Compare commits

..

5 Commits

Author SHA1 Message Date
Marios Christou c3c128c45a fmt 2025-10-09 17:39:41 +03:00
Marios Christou 10472bae22 Merge branch 'main' into refactor_zombinet_substrate 2025-10-09 17:36:43 +03:00
Omar ebc24a588b Add different output formats (#185)
* Add different output formats

* Add the mode to the output
2025-10-09 14:24:14 +00:00
Omar 21e25f09e6 Zombienet & Benchmarks Cleanups (#184)
* Minor zombienet cleanups

* Remove un-necessary trace call from the benchmark driver

* Improve the benchmarks driver

* Ignore the lighthouse tests

* Allow for the consensus to be specified for the revive dev node

* Ignore the zombienet tests for the time being
2025-10-09 11:41:01 +00:00
Marios Christou 8bf0d69e54 Refactor: move chainspec patching and address conversion to common module, remove duplicated logic and tests 2025-10-08 16:33:19 +03:00
51 changed files with 1411 additions and 2327 deletions
-19
View File
@@ -18,28 +18,9 @@ env:
POLKADOT_VERSION: polkadot-stable2506-2
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install nightly toolchain
run: rustup toolchain install nightly
- name: Install rustfmt for nightly
run: rustup component add --toolchain nightly rustfmt
- name: Cargo fmt
run: cargo +nightly fmt --all -- --check
cache-polkadot:
name: Build and cache Polkadot binaries on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: [fmt]
strategy:
matrix:
os: [ubuntu-24.04, macos-14]
+1
View File
@@ -13,3 +13,4 @@ resolc-compiler-tests
workdir
!/schema.json
!/dev-genesis.json
Generated
+13 -23
View File
@@ -782,6 +782,15 @@ dependencies = [
"libc",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "anstream"
version = "0.6.18"
@@ -2328,7 +2337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
dependencies = [
"data-encoding",
"syn 1.0.109",
"syn 2.0.101",
]
[[package]]
@@ -4526,27 +4535,6 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "ml-test-runner"
version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"clap",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-core",
"revive-dt-format",
"revive-dt-node",
"revive-dt-node-interaction",
"revive-dt-report",
"temp-dir",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "moka"
version = "0.12.10"
@@ -5632,6 +5620,7 @@ name = "revive-dt-core"
version = "0.1.0"
dependencies = [
"alloy",
"ansi_term",
"anyhow",
"bson",
"cacache",
@@ -5679,6 +5668,7 @@ version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"async-stream",
"futures",
"revive-common",
"revive-dt-common",
@@ -5963,7 +5953,7 @@ dependencies = [
"security-framework 3.3.0",
"security-framework-sys",
"webpki-root-certs 0.26.11",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
+3 -1
View File
@@ -22,7 +22,9 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
ansi_term = "0.12.1"
anyhow = "1.0"
async-stream = { version = "0.3.6" }
bson = { version = "2.15.0" }
cacache = { version = "13.1.0" }
clap = { version = "4", features = ["derive"] }
@@ -73,7 +75,7 @@ revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev ="891f6554354ce466abd496366dbf8b4f82141241" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.37"
+3 -5
View File
@@ -1,11 +1,9 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again.
use std::{
fs,
io::{Error, Result},
path::{Path, PathBuf},
};
use std::fs;
use std::io::{Error, Result};
use std::path::{Path, PathBuf};
use moka::sync::Cache;
use once_cell::sync::Lazy;
+2 -1
View File
@@ -1,4 +1,5 @@
use std::{ops::ControlFlow, time::Duration};
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow};
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
};
}
/// Technically not needed but this allows for the macro to be found in the `macros` module of
/// the crate in addition to being found in the root of the crate.
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
/// crate in addition to being found in the root of the crate.
pub use {define_wrapper_type, impl_for_wrapper};
+3 -1
View File
@@ -1,7 +1,9 @@
use crate::types::VersionOrRequirement;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::{fmt::Display, str::FromStr, sync::LazyLock};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible.
///
@@ -1,4 +1,5 @@
use alloy::{primitives::U256, signers::local::PrivateKeySigner};
use alloy::primitives::U256;
use alloy::signers::local::PrivateKeySigner;
use anyhow::{Context, Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
+2 -1
View File
@@ -10,7 +10,8 @@ use std::{
pin::Pin,
};
use alloy::{json_abi::JsonAbi, primitives::Address};
use alloy::json_abi::JsonAbi;
use alloy::primitives::Address;
use anyhow::{Context as _, Result};
use semver::Version;
use serde::{Deserialize, Serialize};
+38 -37
View File
@@ -253,44 +253,45 @@ impl SolidityCompiler for Resolc {
.evm
.and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi =
{
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => {
solc_metadata_str.as_str()
}
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata =
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
let abi = {
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => {
solc_metadata_str.as_str()
}
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata = serde_json::from_str::<serde_json::Value>(
solc_metadata_str,
)
.context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
map.insert(contract_name, (bytecode.object, abi));
}
}
+4 -4
View File
@@ -21,7 +21,8 @@ use foundry_compilers_artifacts::{
output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
},
solc::{CompilerOutput as SolcOutput, *},
solc::CompilerOutput as SolcOutput,
solc::*,
};
use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -283,9 +284,8 @@ impl SolidityCompiler for Solc {
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
// mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
// is new enough.
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
}
@@ -7,7 +7,10 @@ pragma solidity >=0.6.9;
import "./callable.sol";
contract Main {
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
function main(
uint[1] calldata p1,
Callable callable
) public pure returns (uint) {
return callable.f(p1);
}
}
+61 -23
View File
@@ -202,19 +202,6 @@ impl AsRef<ReportConfiguration> for Context {
#[derive(Clone, Debug, Parser, Serialize)]
pub struct TestExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed
/// at runtime.
///
/// If not specified, then a temporary directory will be created and used by the program for
/// all temporary artifacts.
#[clap(
short,
long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
@@ -223,6 +210,23 @@ pub struct TestExecutionContext {
)]
pub platforms: Vec<PlatformIdentifier>,
/// The output format to use for the tool's output.
#[arg(short, long, default_value_t = OutputFormat::CargoTestLike)]
pub output_format: OutputFormat,
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
///
/// If not specified, then a temporary directory will be created and used by the program for all
/// temporary artifacts.
#[clap(
short,
long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
@@ -282,11 +286,11 @@ pub struct TestExecutionContext {
#[derive(Clone, Debug, Parser, Serialize)]
pub struct BenchmarkingContext {
/// The working directory that the program will use for all of the temporary artifacts needed
/// at runtime.
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
///
/// If not specified, then a temporary directory will be created and used by the program for
/// all temporary artifacts.
/// If not specified, then a temporary directory will be created and used by the program for all
/// temporary artifacts.
#[clap(
short,
long,
@@ -580,8 +584,8 @@ pub struct ResolcConfiguration {
pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain
/// binary that's provided in the user's $PATH.
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary
/// that's provided in the user's $PATH.
#[clap(
id = "polkadot-parachain.path",
long = "polkadot-parachain.path",
@@ -624,8 +628,8 @@ pub struct GethConfiguration {
pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kurtosis binary
/// that's provided in the user's $PATH.
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's
/// provided in the user's $PATH.
#[clap(
id = "kurtosis.path",
long = "kurtosis.path",
@@ -663,8 +667,8 @@ pub struct KitchensinkConfiguration {
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node
/// binary that's provided in the user's $PATH.
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
@@ -680,6 +684,14 @@ pub struct ReviveDevNodeConfiguration {
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// The consensus to use for the spawned revive-dev-node.
#[clap(
id = "revive-dev-node.consensus",
long = "revive-dev-node.consensus",
default_value = "instant-seal"
)]
pub consensus: String,
}
/// A set of configuration parameters for the ETH RPC.
@@ -950,3 +962,29 @@ pub enum TestingPlatform {
/// A polkadot/Substrate based network
Zombienet,
}
/// The output format to use for the test execution output.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)]
#[strum(serialize_all = "kebab-case")]
pub enum OutputFormat {
/// The legacy format that was used in the past for the output.
Legacy,
/// An output format that looks heavily resembles the output from `cargo test`.
CargoTestLike,
}
+1
View File
@@ -21,6 +21,7 @@ revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
ansi_term = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
bson = { workspace = true }
@@ -22,6 +22,7 @@ use alloy::{
},
};
use anyhow::{Context as _, Result, bail};
use futures::TryFutureExt;
use indexmap::IndexMap;
use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
@@ -35,7 +36,7 @@ use revive_dt_format::{
},
traits::{ResolutionContext, ResolverApi},
};
use tokio::sync::{Mutex, mpsc::UnboundedSender};
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{
@@ -59,8 +60,8 @@ pub struct Driver<'a, I> {
/// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations
/// are needed.
/// The private key allocator used by this driver and other drivers when account allocations are
/// needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform.
@@ -123,13 +124,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
.context("Failed to produce the pre-linking compiled contracts")?;
let mut deployed_libraries = None::<HashMap<_, _>>;
@@ -137,13 +132,7 @@ where
.test_definition
.metadata
.contract_sources()
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to retrieve contract sources from metadata"
)
})
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
.context("Failed to get the contract instances from the metadata file")?;
for library_instance in self
.test_definition
@@ -191,20 +180,19 @@ where
TransactionRequest::default().from(deployer_address),
code,
);
let receipt = self.execute_transaction(tx).await.inspect_err(|err| {
error!(
?err,
%library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to deploy the library"
)
})?;
let receipt = self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
.inspect_err(|err| {
error!(
?err,
%library_instance,
"Failed to deploy the library"
)
})?;
debug!(
?library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Deployed library"
);
debug!(?library_instance, "Deployed library");
let library_address = receipt
.contract_address
@@ -227,13 +215,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Post-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
.context("Failed to compile the post-link contracts")?;
self.execution_state = ExecutionState::new(
@@ -269,7 +251,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%step_path,
),
err(Debug),
@@ -305,15 +286,11 @@ where
.handle_function_call_contract_deployment(step)
.await
.context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
let transaction_hash = self
.handle_function_call_execution(step, deployment_receipts)
.await
.context("Failed to handle the function call execution")?;
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
self.handle_function_call_variable_assignment(step, transaction_hash)
.await
.context("Failed to handle function call variable assignment")?;
Ok(1)
@@ -367,18 +344,19 @@ where
&mut self,
step: &FunctionCallStep,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
) -> Result<TransactionReceipt> {
) -> Result<TxHash> {
match step.method {
// This step was already executed when `handle_step` was called. We just need to
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&step.instance)
.context("Failed to find deployment receipt for constructor call"),
.context("Failed to find deployment receipt for constructor call")
.map(|receipt| receipt.transaction_hash),
Method::Fallback | Method::FunctionName(_) => {
let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?;
self.execute_transaction(tx).await
Ok(self.execute_transaction(tx).await?.0)
}
}
}
@@ -417,15 +395,19 @@ where
async fn handle_function_call_variable_assignment(
&mut self,
step: &FunctionCallStep,
tracing_result: &CallFrame,
tx_hash: TxHash,
) -> Result<()> {
let Some(ref assignments) = step.variable_assignments else {
return Ok(());
};
// Handling the return data variable assignments.
let callframe = OnceCell::new();
for (variable_name, output_word) in assignments.return_data.iter().zip(
tracing_result
callframe
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
.await
.context("Failed to get the callframe trace for transaction")?
.output
.as_ref()
.unwrap_or_default()
@@ -547,7 +529,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -590,7 +571,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -660,7 +640,11 @@ where
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
};
let receipt = match self.execute_transaction(tx).await {
let receipt = match self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
{
Ok(receipt) => receipt,
Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed.");
@@ -734,7 +718,7 @@ where
async fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> anyhow::Result<TransactionReceipt> {
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node;
let transaction_hash = node
.submit_transaction(transaction)
@@ -747,24 +731,28 @@ where
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
.context("Failed to send the transaction hash to the watcher")?;
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.await
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
}
// endregion:Transaction Execution
}
@@ -8,7 +8,7 @@ use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use revive_dt_format::steps::{Step, StepIdx, StepPath};
use tokio::sync::Mutex;
use tracing::{error, info, info_span, instrument, warn};
use tracing::{Instrument, error, info, info_span, instrument, warn};
use revive_dt_config::{BenchmarkingContext, Context};
use revive_dt_report::Reporter;
@@ -159,12 +159,15 @@ pub async fn handle_differential_benchmarks(
futures::future::try_join(
watcher.run(),
driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
driver
.execute_all()
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
.inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
)
.await
.context("Failed to run the driver and executor")
@@ -10,15 +10,14 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)]
/// The state associated with the test execution of one of the workloads.
pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries
/// linked against them and therefore they're ready to be deployed on-demand.
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
/// against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata
/// file.
/// This map stores the variables used for each one of the cases contained in the metadata file.
pub variables: HashMap<String, U256>,
}
@@ -104,6 +104,10 @@ impl Watcher {
async move {
let mut mined_blocks_information = Vec::new();
// region:TEMPORARY
eprintln!("Watcher information for {}", self.platform_identifier);
eprintln!("block_number,block_timestamp,mined_gas,block_gas_limit,tx_count");
// endregion:TEMPORARY
while let Some(block) = blocks_information_stream.next().await {
// If the block number is equal to or less than the last block before the
// repetition then we ignore it and continue on to the next block.
@@ -118,8 +122,9 @@ impl Watcher {
}
info!(
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
block_number = block.block_number,
block_tx_count = block.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
@@ -131,6 +136,20 @@ impl Watcher {
watch_for_transaction_hashes.remove(tx_hash);
}
// region:TEMPORARY
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing
// some very simple reporting for the time being.
eprintln!(
"\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
);
// endregion:TEMPORARY
mined_blocks_information.push(block);
}
@@ -139,41 +158,10 @@ impl Watcher {
}
};
let (_, mined_blocks_information) =
let (_, _) =
futures::future::join(watcher_event_watching_task, block_information_watching_task)
.await;
// region:TEMPORARY
{
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing some
// very simple reporting for the time being.
use std::io::Write;
let mut stderr = std::io::stderr().lock();
writeln!(
stderr,
"Watcher information for {}",
self.platform_identifier
)?;
writeln!(
stderr,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
)?;
for block in mined_blocks_information {
writeln!(
stderr,
"{},{},{},{},{}",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
)?
}
}
// endregion:TEMPORARY
Ok(())
}
}
+5 -22
View File
@@ -131,8 +131,8 @@ pub struct PlatformDriver<'a, I> {
/// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations
/// are needed.
/// The private key allocator used by this driver and other drivers when account allocations are
/// needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform.
@@ -415,13 +415,9 @@ where
let caller = {
let context = self.default_resolution_context();
let resolver = self.platform_information.node.resolver().await?;
let resolved = step
.caller
step.caller
.resolve_address(resolver.as_ref(), context)
.await?;
self.platform_information
.node
.resolve_signer_or_default(resolved)
.await?
};
if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, caller, calldata, value)
@@ -449,7 +445,7 @@ where
.context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => {
let resolver = self.platform_information.node.resolver().await?;
let mut tx = match step
let tx = match step
.as_transaction(resolver.as_ref(), self.default_resolution_context())
.await
{
@@ -459,15 +455,6 @@ where
}
};
// Resolve the signer to ensure we use an address that has keys
if let Some(from) = tx.from {
tx.from = Some(
self.platform_information
.node
.resolve_signer_or_default(from),
);
}
self.platform_information.node.execute_transaction(tx).await
}
}
@@ -967,10 +954,6 @@ where
}
let tx = {
let deployer = self
.platform_information
.node
.resolve_signer_or_default(deployer);
let tx = TransactionRequest::default().from(deployer);
let tx = match value {
Some(ref value) => tx.value(value.into_inner()),
+123 -58
View File
@@ -7,14 +7,15 @@ use std::{
time::{Duration, Instant},
};
use crate::Platform;
use ansi_term::{ANSIStrings, Color};
use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument};
use revive_dt_config::{Context, TestExecutionContext};
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
use crate::{
@@ -176,7 +177,7 @@ pub async fn handle_differential_tests(
.report_completion_event()
.expect("Can't fail")
});
let cli_reporting_task = start_cli_reporting_task(reporter);
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
tokio::task::spawn(async move {
loop {
@@ -196,21 +197,15 @@ pub async fn handle_differential_tests(
}
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) {
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut global_success_count = 0;
let mut global_failure_count = 0;
let mut global_ignore_count = 0;
let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await {
@@ -223,55 +218,125 @@ async fn start_cli_reporting_task(reporter: Reporter) {
continue;
};
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
match output_format {
OutputFormat::Legacy => {
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
global_success_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Green.bold().paint("Case Succeeded"),
Color::Green
.paint(format!(" - Steps Executed: {steps_executed}")),
])
)
}
TestCaseStatus::Failed { reason } => {
global_failure_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Red.bold().paint("Case Failed"),
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
TestCaseStatus::Ignored { reason, .. } => {
global_ignore_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Yellow.bold().paint("Case Ignored"),
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
};
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
let _ = writeln!(buf);
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
"\t{} {} - {}\n",
Color::Green.paint("Running"),
metadata_file_path.display(),
mode
)
.unwrap();
let mut success_count = 0;
let mut failure_count = 0;
let mut ignored_count = 0;
writeln!(buf, "running {} tests", case_status.len()).unwrap();
for (case_idx, case_result) in case_status.iter() {
let status = match case_result {
TestCaseStatus::Succeeded { .. } => {
success_count += 1;
global_success_count += 1;
Color::Green.paint("ok")
}
TestCaseStatus::Failed { reason } => {
failure_count += 1;
global_failure_count += 1;
Color::Red.paint(format!("FAILED, {reason}"))
}
TestCaseStatus::Ignored { reason, .. } => {
ignored_count += 1;
global_ignore_count += 1;
Color::Yellow.paint(format!("ignored, {reason:?}"))
}
};
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
}
writeln!(buf).unwrap();
let status = if failure_count > 0 {
Color::Red.paint("FAILED")
} else {
Color::Green.paint("ok")
};
writeln!(
buf,
"test result: {}. {} passed; {} failed; {} ignored",
status, success_count, failure_count, ignored_count,
)
.unwrap();
writeln!(buf).unwrap()
}
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
match output_format {
OutputFormat::Legacy => {
writeln!(
buf,
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
global_success_count + global_failure_count + global_ignore_count,
Color::Green.paint(global_success_count.to_string()),
Color::Red.paint(global_failure_count.to_string()),
start.elapsed().as_secs()
)
.unwrap();
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
global_success_count,
global_failure_count,
global_ignore_count,
start.elapsed().as_secs()
)
.unwrap();
}
}
}
@@ -10,15 +10,14 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)]
/// The state associated with the test execution of one of the tests.
pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries
/// linked against them and therefore they're ready to be deployed on-demand.
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
/// against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata
/// file.
/// This map stores the variables used for each one of the cases contained in the metadata file.
pub variables: HashMap<String, U256>,
}
+1 -1
View File
@@ -8,10 +8,10 @@ use std::{
sync::{Arc, LazyLock},
};
use crate::Platform;
use futures::FutureExt;
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_core::Platform;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
+1 -1
View File
@@ -2,9 +2,9 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::Platform;
use anyhow::Context as _;
use revive_dt_config::*;
use revive_dt_core::Platform;
use revive_dt_node_interaction::EthereumNode;
/// The node pool starts one or more [Node] which then can be accessed
+13 -7
View File
@@ -1,22 +1,28 @@
use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap};
use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json};
use revive_dt_compiler::{Mode, SolidityCompiler};
use revive_dt_compiler::Mode;
use revive_dt_compiler::SolidityCompiler;
use revive_dt_format::{
case::{Case, CaseIdx},
metadata::MetadataFile,
};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
use revive_dt_report::{ExecutionSpecificReporter, Reporter};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info};
use crate::{Platform, helpers::NodePool};
use crate::Platform;
use crate::helpers::NodePool;
pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the
@@ -63,8 +69,8 @@ pub async fn create_test_definitions_stream<'a>(
)
})
})
// Inform the reporter of each one of the test cases that were discovered which we
// expect to run.
// Inform the reporter of each one of the test cases that were discovered which we expect to
// run.
.inspect(|(_, _, _, _, reporter)| {
reporter
.report_test_case_discovery_event()
+20 -19
View File
@@ -3,9 +3,6 @@
//! This crate defines the testing configuration and
//! provides a helper utility to execute tests.
pub mod differential_tests;
pub mod helpers;
use std::{
pin::Pin,
thread::{self, JoinHandle},
@@ -17,17 +14,13 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*;
use revive_dt_node::{
Node,
node_implementations::{
geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
zombienet::ZombieNode,
},
Node, node_implementations::geth::GethNode,
node_implementations::lighthouse_geth::LighthouseGethNode,
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
};
use revive_dt_node_interaction::EthereumNode;
use tracing::info;
pub use helpers::CachedCompiler;
/// A trait that describes the interface for the platforms that are supported by the tool.
#[allow(clippy::type_complexity)]
pub trait Platform {
@@ -191,6 +184,7 @@ impl Platform for KitchensinkPolkavmResolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
);
let node = spawn_node(node, genesis)?;
@@ -243,6 +237,7 @@ impl Platform for KitchensinkRevmSolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
);
let node = spawn_node(node, genesis)?;
@@ -287,14 +282,17 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
);
let node = spawn_node(node, genesis)?;
@@ -339,14 +337,17 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
);
let node = spawn_node(node, genesis)?;
@@ -396,7 +397,7 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -408,7 +409,7 @@ impl Platform for ZombienetPolkavmResolcPlatform {
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
@@ -444,7 +445,7 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
+2 -2
View File
@@ -45,8 +45,8 @@ pub struct Case {
#[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then
/// the case will not be ignored.
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
/// case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>,
}
+11 -13
View File
@@ -31,8 +31,8 @@ pub struct MetadataFile {
/// The path of the metadata file. This will either be a JSON or solidity file.
pub metadata_file_path: PathBuf,
/// This is the path contained within the corpus file. This could either be the path of some
/// dir or could be the actual metadata file path.
/// This is the path contained within the corpus file. This could either be the path of some dir
/// or could be the actual metadata file path.
pub corpus_file_path: PathBuf,
/// The metadata contained within the file.
@@ -69,13 +69,13 @@ impl Deref for MetadataFile {
/// of steps and assertions that should be performed as part of the test case.
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub struct Metadata {
/// This is an optional comment on the metadata file which has no impact on the execution in
/// any way.
/// This is an optional comment on the metadata file which has no impact on the execution in any
/// way.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// An optional boolean which defines if the metadata file as a whole should be ignored. If
/// null then the metadata file will not be ignored.
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
/// then the metadata file will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>,
@@ -94,8 +94,8 @@ pub struct Metadata {
/// This is a map where the key is the name of the contract instance and the value is the
/// contract's path and ident in the file.
///
/// If any contract is to be used by the test then it must be included in here first so that
/// the framework is aware of its path, compiles it, and prepares it.
/// If any contract is to be used by the test then it must be included in here first so that the
/// framework is aware of its path, compiles it, and prepares it.
#[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
@@ -123,9 +123,8 @@ pub struct Metadata {
pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a
/// [`Mode`] is just a filter for when a test can run whereas this is an instruction to the
/// compiler.
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
/// is just a filter for when a test can run whereas this is an instruction to the compiler.
#[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>,
}
@@ -327,8 +326,7 @@ define_wrapper_type!(
)]
#[serde(try_from = "String", into = "String")]
pub struct ContractPathAndIdent {
/// The path of the contract source code relative to the directory containing the metadata
/// file.
/// The path of the contract source code relative to the directory containing the metadata file.
pub contract_source_path: PathBuf,
/// The identifier of the contract.
+6 -5
View File
@@ -1,12 +1,13 @@
use anyhow::Context as _;
use regex::Regex;
use revive_dt_common::{
iterators::EitherIter,
types::{Mode, ModeOptimizerSetting, ModePipeline},
};
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata.
///
+26 -31
View File
@@ -1,10 +1,11 @@
use std::{collections::HashMap, fmt::Display, str::FromStr};
use alloy::primitives::{FixedBytes, utils::parse_units};
use alloy::{
eips::BlockNumberOrTag,
json_abi::Function,
network::TransactionBuilder,
primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units},
primitives::{Address, Bytes, U256},
rpc::types::TransactionRequest,
};
use anyhow::Context as _;
@@ -16,10 +17,8 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::macros::define_wrapper_type;
use tracing::{Instrument, info_span, instrument};
use crate::{
metadata::ContractInstance,
traits::{ResolutionContext, ResolverApi},
};
use crate::traits::ResolverApi;
use crate::{metadata::ContractInstance, traits::ResolutionContext};
/// A test step.
///
@@ -148,8 +147,8 @@ pub struct FunctionCallStep {
#[schemars(skip)]
pub storage: Option<HashMap<String, Calldata>>,
/// Variable assignment to perform in the framework allowing us to reference them again later
/// on during the execution.
/// Variable assignment to perform in the framework allowing us to reference them again later on
/// during the execution.
#[serde(skip_serializing_if = "Option::is_none")]
pub variable_assignments: Option<VariableAssignments>,
}
@@ -489,20 +488,21 @@ impl FunctionCallStep {
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
};
// We follow the same logic that's implemented in the matter-labs-tester where they
// resolve the function name into a function selector and they assume that he
// function doesn't have any existing overloads.
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
// the function name into a function selector and they assume that he function doesn't have
// any existing overloads.
// Overloads are handled by providing the full function signature in the "function
// name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
let selector =
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
.context(
"Failed to parse the provided function name into a function signature",
)?
.selector()
} else {
abi.functions()
} else {
abi.functions()
.find(|function| function.signature().starts_with(function_name))
.ok_or_else(|| {
anyhow::anyhow!(
@@ -511,21 +511,19 @@ impl FunctionCallStep {
&self.instance
)
})
.with_context(|| {
format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
)
})?
.with_context(|| format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
))?
.selector()
};
};
// Allocating a vector that we will be using for the calldata. The vector size will
// be: 4 bytes for the function selector.
// Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector.
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
//
// We're using indices in the following code in order to avoid the need for us to
// allocate a new buffer for each one of the resolved arguments.
// We're using indices in the following code in order to avoid the need for us to allocate
// a new buffer for each one of the resolved arguments.
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
calldata.extend(selector.0);
self.calldata
@@ -961,12 +959,9 @@ impl<'de> Deserialize<'de> for EtherValue {
#[cfg(test)]
mod tests {
use alloy::{
eips::BlockNumberOrTag,
json_abi::JsonAbi,
primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address},
sol_types::SolValue,
};
use alloy::primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
use alloy::sol_types::SolValue;
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
use std::{collections::HashMap, pin::Pin};
use super::*;
+6 -6
View File
@@ -1,10 +1,10 @@
use std::{collections::HashMap, pin::Pin};
use std::collections::HashMap;
use std::pin::Pin;
use alloy::{
eips::BlockNumberOrTag,
json_abi::JsonAbi,
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
};
use alloy::eips::BlockNumberOrTag;
use alloy::json_abi::JsonAbi;
use alloy::primitives::TxHash;
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance};
-34
View File
@@ -1,34 +0,0 @@
[package]
name = "ml-test-runner"
description = "ML-based test runner for executing differential tests file by file"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "ml-test-runner"
path = "src/main.rs"
[dependencies]
revive-dt-common = { workspace = true }
revive-dt-compiler = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-core = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tokio = { workspace = true }
temp-dir = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
[lints]
workspace = true
-74
View File
@@ -1,74 +0,0 @@
# ML Test Runner
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
- Running tests file-by-file (rather than in bulk)
- Caching passed tests to skip them in future runs
- Providing cargo-test-style output for easy integration with ML pipelines
- Single platform testing (rather than differential testing)
## Features
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
- **Cached results**: Skip tests that have already passed using `--cached-passed`
- **Fail fast**: Stop on first failure with `--bail`
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
- **Platform support**: Test against `geth` or `kitchensink` platforms
## Usage
```bash
# Run a single .sol file (compile-only mode, default)
./ml-test-runner path/to/test.sol --platform geth
# Run all tests in a corpus file
./ml-test-runner path/to/corpus.json --platform kitchensink
# Walk a directory recursively for .sol files
./ml-test-runner path/to/tests/ --platform geth
# Use cached results and bail on first failure
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
# Start the platform and execute tests (full mode)
./ml-test-runner path/to/tests/ --platform geth --start-platform
# Enable verbose logging (info, debug, or trace level)
RUST_LOG=info ./ml-test-runner path/to/tests/
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
```
## Arguments
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
- `--cached-passed <FILE>` - File to track tests that have already passed
- `--bail` - Stop after the first file failure
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
## Output Format
The runner produces cargo-test-style output:
```
test path/to/test1.sol ... ok
test path/to/test2.sol ... FAILED
test path/to/test3.sol ... cached
failures:
---- path/to/test2.sol ----
Error: ...
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
```
## Building
```bash
cargo build --release -p ml-test-runner
```
The binary will be available at `target/release/ml-test-runner`.
-541
View File
@@ -1,541 +0,0 @@
use anyhow::Context;
use clap::Parser;
use revive_dt_common::{
iterators::FilesWithExtensionIterator,
types::{PlatformIdentifier, PrivateKeyAllocator},
};
use revive_dt_config::TestExecutionContext;
use revive_dt_core::{
CachedCompiler, Platform,
helpers::{TestDefinition, TestPlatformInformation},
};
use revive_dt_format::{
case::CaseIdx,
corpus::Corpus,
metadata::{Metadata, MetadataFile},
};
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet},
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::Instant,
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
/// ML-based test runner for executing differential tests file by file
#[derive(Debug, Parser)]
#[command(name = "ml-test-runner")]
struct MlTestRunnerArgs {
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
#[arg(value_name = "PATH")]
path: PathBuf,
/// File to cache tests that have already passed
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
#[arg(long = "platform", default_value = "geth-evm-solc")]
platform: PlatformIdentifier,
/// Start the platform and wait for RPC readiness
#[arg(long = "start-platform", default_value = "false")]
start_platform: bool,
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
#[arg(
long = "private-key",
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
)]
private_key: String,
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
}
fn main() -> anyhow::Result<()> {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber");
let args = MlTestRunnerArgs::parse();
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(run(args))
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
info!("Discovering test files from: {}", args.path.display());
let test_files = discover_test_files(&args.path)?;
info!("Found {} test file(s)", test_files.len());
let cached_passed = if let Some(cache_file) = &args.cached_passed {
let cached = load_cached_passed(cache_file)?;
info!("Loaded {} cached passed test(s)", cached.len());
cached
} else {
HashSet::new()
};
let cached_passed = Arc::new(Mutex::new(cached_passed));
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
let mut failures = Vec::new();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const YELLOW: &str = "\x1B[33m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
for test_file in test_files {
let file_display = test_file.display().to_string();
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display);
skipped_files += 1;
continue;
}
}
info!("Loading metadata from: {}", test_file.display());
let metadata_file = match load_metadata_file(&test_file) {
Ok(mf) => {
info!("Loaded metadata with {} case(s)", mf.cases.len());
mf
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
println!(" Error loading metadata: {}", e);
failed_files += 1;
failures.push((
file_display.clone(),
format!("Error loading metadata: {}", e),
));
if args.bail {
break;
}
continue;
}
};
info!("Executing test file: {}", file_display);
match execute_test_file(&args, &metadata_file).await {
Ok(_) => {
println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display);
info!("Test file passed: {}", file_display);
passed_files += 1;
{
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
}
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
failed_files += 1;
failures.push((file_display, format!("{:?}", e)));
if args.bail {
info!("Bailing after first failure");
break;
}
}
}
}
if let Some(cache_file) = &args.cached_passed {
let cache = cached_passed.lock().await;
info!("Saving {} cached passed test(s)", cache.len());
save_cached_passed(cache_file, &cache)?;
}
// Print summary
println!();
if !failures.is_empty() {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
println!("---- {} ----", file);
println!("{}", error);
println!();
}
}
let elapsed = start_time.elapsed();
println!(
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
if failed_files == 0 {
format!("{GREEN}ok{COLOUR_RESET}")
} else {
format!("{RED}FAILED{COLOUR_RESET}")
},
passed_files,
failed_files,
skipped_files,
elapsed.as_secs_f64()
);
if failed_files > 0 {
std::process::exit(1);
}
Ok(())
}
/// Discover test files from the given path
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
if !path.exists() {
anyhow::bail!("Path does not exist: {}", path.display());
}
let mut files = Vec::new();
if path.is_file() {
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
match extension {
"sol" => {
// Single .sol file
files.push(path.to_path_buf());
}
"json" => {
// Corpus file - enumerate its tests
let corpus = Corpus::try_from_path(path)?;
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
}
_ => anyhow::bail!(
"Unsupported file extension: {}. Expected .sol or .json",
extension
),
}
} else if path.is_dir() {
// Walk directory recursively for .sol files
for entry in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
files.push(entry);
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
}
Ok(files)
}
/// Load metadata from a test file
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
let metadata = Metadata::try_from_file(path)
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
Ok(MetadataFile {
metadata_file_path: path.to_path_buf(),
corpus_file_path: path.to_path_buf(),
content: metadata,
})
}
/// Execute all test cases in a metadata file
async fn execute_test_file(
args: &MlTestRunnerArgs,
metadata_file: &MetadataFile,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
}
info!("Processing {} test case(s)", metadata_file.cases.len());
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc => {
&revive_dt_core::KitchensinkPolkavmResolcPlatform
}
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform
}
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc => {
&revive_dt_core::ZombienetPolkavmResolcPlatform
}
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle = platform
.new_node(context.clone())
.context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!(
"Node started with ID: {}, connection: {}",
node.id(),
node.connection_string()
);
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions()
.await
.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc => {
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
)
}
PlatformIdentifier::KitchensinkPolkavmResolc
| PlatformIdentifier::KitchensinkRevmSolc
| PlatformIdentifier::ReviveDevNodePolkavmResolc
| PlatformIdentifier::ReviveDevNodeRevmSolc
| PlatformIdentifier::ZombienetPolkavmResolc
| PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
.map(Arc::new)
.context("Failed to create cached compiler")?;
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
alloy::primitives::U256::from(100),
)));
let (reporter, report_task) =
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
tokio::spawn(report_task);
info!(
"Building test definitions for {} case(s)",
metadata_file.cases.len()
);
let mut test_definitions = Vec::new();
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
info!("Building test definition for case {}", case_idx);
let test_def = build_test_definition(
metadata_file,
case,
case_idx,
platform,
node,
&context,
&reporter,
)
.await?;
if let Some(test_def) = test_def {
info!("Test definition for case {} created successfully", case_idx);
test_definitions.push(test_def);
}
}
info!("Executing {} test definition(s)", test_definitions.len());
for (idx, test_definition) in test_definitions.iter().enumerate() {
info!("─────────────────────────────────────────────────────────────────");
info!(
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
idx + 1,
test_definitions.len(),
test_definition.case_idx,
test_definition.mode,
test_definition.case.steps.len()
);
info!("Creating driver for case {}", test_definition.case_idx);
let driver = revive_dt_core::differential_tests::Driver::new_root(
test_definition,
private_key_allocator.clone(),
&cached_compiler,
)
.await
.context("Failed to create driver")?;
info!(
"Running {} step(s) for case {}",
test_definition.case.steps.len(),
test_definition.case_idx
);
let steps_executed = driver.execute_all().await.context(format!(
"Failed to execute case {}",
test_definition.case_idx
))?;
info!(
"✓ Case {} completed successfully, executed {} step(s)",
test_definition.case_idx, steps_executed
);
}
info!("─────────────────────────────────────────────────────────────────");
info!(
"All {} test case(s) executed successfully",
test_definitions.len()
);
Ok(())
}
/// Build a test definition for a single test case
async fn build_test_definition<'a>(
metadata_file: &'a MetadataFile,
case: &'a revive_dt_format::case::Case,
case_idx: usize,
platform: &'a dyn Platform,
node: &'a dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
reporter: &revive_dt_report::Reporter,
) -> anyhow::Result<Option<TestDefinition<'a>>> {
let mode = case
.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.and_then(|modes| modes.first())
.and_then(|parsed_mode| parsed_mode.to_modes().next())
.map(Cow::Owned)
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
.unwrap();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.context("Failed to create compiler")?;
let test_reporter =
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
}));
let execution_reporter =
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
let mut platforms = BTreeMap::new();
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation {
platform,
node,
compiler,
reporter: execution_reporter,
},
);
let test_definition = TestDefinition {
metadata: metadata_file,
metadata_file_path: &metadata_file.metadata_file_path,
mode,
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter: test_reporter,
};
if let Err((reason, _)) = test_definition.check_compatibility() {
println!(" Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
Ok(Some(test_definition))
}
/// Load cached passed tests from file
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
if !path.exists() {
return Ok(HashSet::new());
}
let file = File::open(path).context("Failed to open cached-passed file")?;
let reader = BufReader::new(file);
let mut cache = HashSet::new();
for line in reader.lines() {
let line = line?;
let trimmed = line.trim();
if !trimmed.is_empty() {
cache.insert(trimmed.to_string());
}
}
Ok(cache)
}
/// Save cached passed tests to file
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-passed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+5 -12
View File
@@ -1,14 +1,11 @@
//! This crate implements all node interactions.
use std::{pin::Pin, sync::Arc};
use std::pin::Pin;
use std::sync::Arc;
use alloy::{
primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
},
};
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
use anyhow::Result;
use futures::Stream;
@@ -77,10 +74,6 @@ pub trait EthereumNode {
+ '_,
>,
>;
/// Checks if the provided address is in the wallet. If it is, returns the address.
/// Otherwise, returns the default signer's address.
fn resolve_signer_or_default(&self, address: Address) -> Address;
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+1
View File
@@ -11,6 +11,7 @@ rust-version.workspace = true
[dependencies]
anyhow = { workspace = true }
alloy = { workspace = true }
async-stream = { workspace = true }
futures = { workspace = true }
tracing = { workspace = true }
tower = { workspace = true }
@@ -0,0 +1,193 @@
use alloy::{
genesis::{Genesis, GenesisAccount},
network::{Ethereum, NetworkWallet},
primitives::{Address, U256},
};
use anyhow::{Context, Result};
use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use std::{fs::File, path::Path, process::Command};
pub fn export_and_patch_chainspec_json(
binary_path: &Path,
export_command: &str,
chain_arg: &str,
output_path: &Path,
genesis: &mut Genesis,
wallet: &impl NetworkWallet<Ethereum>,
initial_balance: u128,
) -> Result<()> {
// Note: we do not pipe the logs of this process to a separate file since this is just a
// once-off export of the default chain spec and not part of the long-running node process.
let output = Command::new(binary_path)
.arg(export_command)
.arg("--chain")
.arg(chain_arg)
.env_remove("RUST_LOG")
.output()
.context("Failed to export the chain-spec")?;
if !output.status.success() {
anyhow::bail!(
"Export chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content =
String::from_utf8(output.stdout).context("Failed to decode chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array()
.cloned()
.unwrap_or_default();
let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances
.into_iter()
.filter_map(|val| {
if let Some(arr) = val.as_array() {
if arr.len() == 2 {
let account = arr[0].as_str()?.to_string();
let balance = arr[1].as_f64()? as u128;
return Some((account, balance));
}
}
None
})
.collect();
for signer_address in wallet.signer_addresses() {
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(initial_balance)));
}
let mut eth_balances =
crate::node_implementations::common::chainspec::extract_balance_from_genesis_file(genesis)
.context("Failed to extract balances from EVM genesis JSON")?;
merged_balances.append(&mut eth_balances);
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] =
json!(merged_balances);
serde_json::to_writer_pretty(
File::create(output_path).context("Failed to create chainspec file")?,
&chainspec_json,
)
.context("Failed to write chainspec JSON")?;
Ok(())
}
pub fn extract_balance_from_genesis_file(genesis: &Genesis) -> anyhow::Result<Vec<(String, u128)>> {
genesis
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let substrate_address = eth_to_polkadot_address(address);
let balance = acc.balance.try_into()?;
vec.push((substrate_address, balance));
Ok(vec)
})
}
pub fn eth_to_polkadot_address(address: &Address) -> String {
let eth_bytes = address.0.0;
let mut padded = [0xEEu8; 32];
padded[..20].copy_from_slice(&eth_bytes);
let account_id = AccountId32::from(padded);
account_id.to_ss58check()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
{
"alloc": {
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" },
"0x0000000000000000000000000000000000000000": { "balance": "0xDE0B6B3A7640000" },
"0xffffffffffffffffffffffffffffffffffffffff": { "balance": "123456789" }
}
}
"#;
let result =
extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
.unwrap();
let result_map: std::collections::HashMap<_, _> = result.into_iter().collect();
assert_eq!(
result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"),
Some(&123_456_789u128)
);
}
#[test]
fn test_eth_to_polkadot_address() {
let cases = vec![
(
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"0x0000000000000000000000000000000000000000",
"5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1",
),
(
"0xffffffffffffffffffffffffffffffffffffffff",
"5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4",
),
];
for (eth_addr, expected_ss58) in cases {
let result = eth_to_polkadot_address(&eth_addr.parse().unwrap());
assert_eq!(
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
);
}
}
#[test]
fn print_eth_to_polkadot_mappings() {
let eth_addresses = vec![
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"0xffffffffffffffffffffffffffffffffffffffff",
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
];
for eth_addr in eth_addresses {
let ss58 = eth_to_polkadot_address(&eth_addr.parse().unwrap());
println!("Ethereum: {eth_addr} -> Polkadot SS58: {ss58}");
}
}
}
@@ -0,0 +1,3 @@
pub(crate) mod chainspec;
pub(crate) mod process;
pub(crate) mod revive;
@@ -0,0 +1,60 @@
use anyhow::Result;
use std::{
process::{Command, Stdio},
{path::Path, time::Duration},
};
use crate::helpers::{Process, ProcessReadinessWaitBehavior};
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
pub fn spawn_eth_rpc_process(
logs_directory: &Path,
eth_proxy_binary: &Path,
node_rpc_url: &str,
eth_rpc_port: u16,
extra_args: &[&str],
ready_marker: &str,
) -> anyhow::Result<Process> {
let ready_marker = ready_marker.to_owned();
Process::new(
"proxy",
logs_directory,
eth_proxy_binary,
|command, stdout_file, stderr_file| {
command
.arg("--node-rpc-url")
.arg(node_rpc_url)
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--rpc-port")
.arg(eth_rpc_port.to_string())
.env("RUST_LOG", PROXY_LOG_ENV);
for arg in extra_args {
command.arg(arg);
}
command.stdout(stdout_file).stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(move |_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(&ready_marker)),
None => Ok(false),
}),
},
)
}
pub fn command_version(command_binary: &Path) -> Result<String> {
let output = Command::new(command_binary)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
@@ -0,0 +1,434 @@
use alloy::{
consensus::{BlockHeader, TxEnvelope},
network::{
Ethereum, Network, TransactionBuilder, TransactionBuilderError, UnbuiltTransactionError,
},
primitives::{Address, B64, B256, BlockNumber, Bloom, Bytes, U256},
rpc::types::eth::{Block, Header, Transaction},
};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ReviveNetwork;
impl Network for ReviveNetwork {
type TxType = <Ethereum as Network>::TxType;
type TxEnvelope = <Ethereum as Network>::TxEnvelope;
type UnsignedTx = <Ethereum as Network>::UnsignedTx;
type ReceiptEnvelope = <Ethereum as Network>::ReceiptEnvelope;
type Header = ReviveHeader;
type TransactionRequest = <Ethereum as Network>::TransactionRequest;
type TransactionResponse = <Ethereum as Network>::TransactionResponse;
type ReceiptResponse = <Ethereum as Network>::ReceiptResponse;
type HeaderResponse = Header<ReviveHeader>;
type BlockResponse = Block<Transaction<TxEnvelope>, Header<ReviveHeader>>;
}
impl TransactionBuilder<ReviveNetwork> for <Ethereum as Network>::TransactionRequest {
fn chain_id(&self) -> Option<alloy::primitives::ChainId> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::chain_id(self)
}
fn set_chain_id(&mut self, chain_id: alloy::primitives::ChainId) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_chain_id(
self, chain_id,
)
}
fn nonce(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::nonce(self)
}
fn set_nonce(&mut self, nonce: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_nonce(
self, nonce,
)
}
fn take_nonce(&mut self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::take_nonce(
self,
)
}
fn input(&self) -> Option<&alloy::primitives::Bytes> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::input(self)
}
fn set_input<T: Into<alloy::primitives::Bytes>>(&mut self, input: T) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_input(
self, input,
)
}
fn from(&self) -> Option<Address> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::from(self)
}
fn set_from(&mut self, from: Address) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_from(
self, from,
)
}
fn kind(&self) -> Option<alloy::primitives::TxKind> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::kind(self)
}
fn clear_kind(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::clear_kind(
self,
)
}
fn set_kind(&mut self, kind: alloy::primitives::TxKind) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_kind(
self, kind,
)
}
fn value(&self) -> Option<alloy::primitives::U256> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::value(self)
}
fn set_value(&mut self, value: alloy::primitives::U256) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_value(
self, value,
)
}
fn gas_price(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_price(self)
}
fn set_gas_price(&mut self, gas_price: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_price(
self, gas_price,
)
}
fn max_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_fee_per_gas(
self,
)
}
fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_fee_per_gas(
self, max_fee_per_gas
)
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_priority_fee_per_gas(
self,
)
}
fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_priority_fee_per_gas(
self, max_priority_fee_per_gas
)
}
fn gas_limit(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_limit(self)
}
fn set_gas_limit(&mut self, gas_limit: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_limit(
self, gas_limit,
)
}
fn access_list(&self) -> Option<&alloy::rpc::types::AccessList> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::access_list(
self,
)
}
fn set_access_list(&mut self, access_list: alloy::rpc::types::AccessList) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_access_list(
self,
access_list,
)
}
fn complete_type(
&self,
ty: <ReviveNetwork as Network>::TxType,
) -> Result<(), Vec<&'static str>> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::complete_type(
self, ty,
)
}
fn can_submit(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_submit(
self,
)
}
fn can_build(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_build(self)
}
fn output_tx_type(&self) -> <ReviveNetwork as Network>::TxType {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type(
self,
)
}
fn output_tx_type_checked(&self) -> Option<<ReviveNetwork as Network>::TxType> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type_checked(
self,
)
}
fn prep_for_submission(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::prep_for_submission(
self,
)
}
fn build_unsigned(
self,
) -> alloy::network::BuildResult<<ReviveNetwork as Network>::UnsignedTx, ReviveNetwork> {
let result = <<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::build_unsigned(
self,
);
match result {
Ok(unsigned_tx) => Ok(unsigned_tx),
Err(UnbuiltTransactionError { request, error }) => {
Err(UnbuiltTransactionError::<ReviveNetwork> {
request,
error: match error {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items)
}
TransactionBuilderError::UnsupportedSignatureType => {
TransactionBuilderError::UnsupportedSignatureType
}
TransactionBuilderError::Signer(error) => {
TransactionBuilderError::Signer(error)
}
TransactionBuilderError::Custom(error) => {
TransactionBuilderError::Custom(error)
}
},
})
}
}
}
async fn build<W: alloy::network::NetworkWallet<ReviveNetwork>>(
self,
wallet: &W,
) -> Result<<ReviveNetwork as Network>::TxEnvelope, TransactionBuilderError<ReviveNetwork>>
{
Ok(wallet.sign_request(self).await?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReviveHeader {
/// The Keccak 256-bit hash of the parent
/// blocks header, in its entirety; formally Hp.
pub parent_hash: B256,
/// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho.
#[serde(rename = "sha3Uncles", alias = "ommersHash")]
pub ommers_hash: B256,
/// The 160-bit address to which all fees collected from the successful mining of this block
/// be transferred; formally Hc.
#[serde(rename = "miner", alias = "beneficiary")]
pub beneficiary: Address,
/// The Keccak 256-bit hash of the root node of the state trie, after all transactions are
/// executed and finalisations applied; formally Hr.
pub state_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
/// transaction in the transactions list portion of the block; formally Ht.
pub transactions_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts
/// of each transaction in the transactions list portion of the block; formally He.
pub receipts_root: B256,
/// The Bloom filter composed from indexable information (logger address and log topics)
/// contained in each log entry from the receipt of each transaction in the transactions list;
/// formally Hb.
pub logs_bloom: Bloom,
/// A scalar value corresponding to the difficulty level of this block. This can be calculated
/// from the previous blocks difficulty level and the timestamp; formally Hd.
pub difficulty: U256,
/// A scalar value equal to the number of ancestor blocks. The genesis block has a number of
/// zero; formally Hi.
#[serde(with = "alloy::serde::quantity")]
pub number: BlockNumber,
/// A scalar value equal to the current limit of gas expenditure per block; formally Hl.
// This is the main difference over the Ethereum network implementation. We use u128 here and
// not u64.
#[serde(with = "alloy::serde::quantity")]
pub gas_limit: u128,
/// A scalar value equal to the total gas used in transactions in this block; formally Hg.
#[serde(with = "alloy::serde::quantity")]
pub gas_used: u64,
/// A scalar value equal to the reasonable output of Unixs time() at this blocks inception;
/// formally Hs.
#[serde(with = "alloy::serde::quantity")]
pub timestamp: u64,
/// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or
/// fewer; formally Hx.
pub extra_data: Bytes,
/// A 256-bit hash which, combined with the
/// nonce, proves that a sufficient amount of computation has been carried out on this block;
/// formally Hm.
pub mix_hash: B256,
/// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of
/// computation has been carried out on this block; formally Hn.
pub nonce: B64,
/// A scalar representing EIP1559 base fee which can move up or down each block according
/// to a formula which is a function of gas used in parent block and gas target
/// (block gas limit divided by elasticity multiplier) of parent block.
/// The algorithm results in the base fee per gas increasing when blocks are
/// above the gas target, and decreasing when blocks are below the gas target. The base fee per
/// gas is burned.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub base_fee_per_gas: Option<u64>,
/// The Keccak 256-bit hash of the withdrawals list portion of this block.
/// <https://eips.ethereum.org/EIPS/eip-4895>
#[serde(default, skip_serializing_if = "Option::is_none")]
pub withdrawals_root: Option<B256>,
/// The total amount of blob gas consumed by the transactions within the block, added in
/// EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub blob_gas_used: Option<u64>,
/// A running total of blob gas consumed in excess of the target, prior to the block. Blocks
/// with above-target blob gas consumption increase this value, blocks with below-target blob
/// gas consumption decrease it (bounded at 0). This was added in EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub excess_blob_gas: Option<u64>,
/// The hash of the parent beacon block's root is included in execution blocks, as proposed by
/// EIP-4788.
///
/// This enables trust-minimized access to consensus state, supporting staking pools, bridges,
/// and more.
///
/// The beacon roots contract handles root storage, enhancing Ethereum's functionalities.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_beacon_block_root: Option<B256>,
/// The Keccak 256-bit hash of the an RLP encoded list with each
/// [EIP-7685] request in the block body.
///
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685
#[serde(default, skip_serializing_if = "Option::is_none")]
pub requests_hash: Option<B256>,
}
impl BlockHeader for ReviveHeader {
fn parent_hash(&self) -> B256 {
self.parent_hash
}
fn ommers_hash(&self) -> B256 {
self.ommers_hash
}
fn beneficiary(&self) -> Address {
self.beneficiary
}
fn state_root(&self) -> B256 {
self.state_root
}
fn transactions_root(&self) -> B256 {
self.transactions_root
}
fn receipts_root(&self) -> B256 {
self.receipts_root
}
fn withdrawals_root(&self) -> Option<B256> {
self.withdrawals_root
}
fn logs_bloom(&self) -> Bloom {
self.logs_bloom
}
fn difficulty(&self) -> U256 {
self.difficulty
}
fn number(&self) -> BlockNumber {
self.number
}
// There's sadly nothing that we can do about this. We're required to implement this trait on
// any type that represents a header and the gas limit type used here is a u64.
fn gas_limit(&self) -> u64 {
self.gas_limit.try_into().unwrap_or(u64::MAX)
}
fn gas_used(&self) -> u64 {
self.gas_used
}
fn timestamp(&self) -> u64 {
self.timestamp
}
fn mix_hash(&self) -> Option<B256> {
Some(self.mix_hash)
}
fn nonce(&self) -> Option<B64> {
Some(self.nonce)
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.base_fee_per_gas
}
fn blob_gas_used(&self) -> Option<u64> {
self.blob_gas_used
}
fn excess_blob_gas(&self) -> Option<u64> {
self.excess_blob_gas
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.parent_beacon_block_root
}
fn requests_hash(&self) -> Option<B256> {
self.requests_hash
}
fn extra_data(&self) -> &Bytes {
&self.extra_data
}
}
+17 -143
View File
@@ -18,9 +18,7 @@ use alloy::{
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256,
},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
ext::DebugApi,
@@ -77,7 +75,6 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
chain_id: ChainId,
}
impl GethNode {
@@ -128,120 +125,9 @@ impl GethNode {
wallet: wallet.clone(),
nonce_manager: Default::default(),
provider: Default::default(),
chain_id: CHAIN_ID,
}
}
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key
.trim()
.strip_prefix("0x")
.unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let address = signer.address();
let wallet = Arc::new(EthereumWallet::new(signer));
let connection_string = format!("http://localhost:{}", rpc_port);
let chain_id = ProviderBuilder::new()
.connect_http(connection_string.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
let node = Self {
connection_string: format!("http://localhost:{}", rpc_port),
base_directory: PathBuf::new(),
data_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
geth: PathBuf::new(),
id: 0,
chain_id,
handle: None,
start_timeout: Duration::from_secs(0),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
};
// Check balance and fund if needed
node.ensure_funded(address).await?;
Ok(node)
}
/// Ensure that the given address has at least 1000 ETH, funding it from the node's managed
/// account if necessary.
async fn ensure_funded(&self, address: Address) -> anyhow::Result<()> {
use alloy::{
primitives::utils::{format_ether, parse_ether},
providers::{Provider, ProviderBuilder},
};
let provider = ProviderBuilder::new().connect_http(self.connection_string.parse()?);
let balance = provider.get_balance(address).await?;
let min_balance = parse_ether("1000")?;
if balance >= min_balance {
tracing::info!(
"Wallet {} already has sufficient balance: {} ETH",
address,
format_ether(balance)
);
return Ok(());
}
tracing::info!(
"Funding wallet {} (current: {} ETH, target: 1000 ETH)",
address,
format_ether(balance)
);
// Get the node's managed account
let accounts = provider.get_accounts().await?;
if accounts.is_empty() {
anyhow::bail!("No managed accounts available on the node to fund wallet");
}
let from_account = accounts[0];
let funding_amount = min_balance - balance;
let tx = TransactionRequest::default()
.from(from_account)
.to(address)
.value(funding_amount);
provider
.send_transaction(tx)
.await?
.get_receipt()
.await
.context("Failed to get receipt for funding transaction")?;
tracing::info!("Successfully funded wallet {}", address);
Ok(())
}
/// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
@@ -369,7 +255,7 @@ impl GethNode {
construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(),
FallbackGasFiller::default(),
ChainIdFiller::new(Some(self.chain_id)),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
@@ -463,25 +349,23 @@ impl EthereumNode for GethNode {
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in
// geth https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// The following is a fix for the "transaction indexing is in progress" error that we used
// to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
// it has been indexed. When we call alloy's `get_receipt` it checks if the
// transaction was confirmed. If it has been, then it will call
// `eth_getTransactionReceipt` method which _might_ return the above error if the tx
// has not yet been indexed yet. So, we need to implement a retry mechanism for the
// receipt to keep retrying to get it until it eventually works, but we only do that
// if the error we get back is the "transaction indexing is in progress" error or if
// the receipt is None.
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
// implement a retry mechanism for the receipt to keep retrying to get it until it
// eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially
// when a lot of transactions are being submitted to the node. Thus, while initially
// we only allowed for 60 seconds of waiting with a 1 second delay in polling, we
// need to allow for a larger wait time. Therefore, in here we allow for 5 minutes of
// waiting with exponential backoff each time we attempt to get the receipt and find
// that it's not available.
// Getting the transaction indexed and taking a receipt can take a long time especially when
// a lot of transactions are being submitted to the node. Thus, while initially we only
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
// backoff each time we attempt to get the receipt and find that it's not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -658,16 +542,6 @@ impl EthereumNode for GethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct GethNodeResolver {
@@ -761,16 +761,6 @@ impl EthereumNode for LighthouseGethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -1141,6 +1131,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = new_node();
@@ -1,3 +1,5 @@
mod common;
pub mod geth;
pub mod lighthouse_geth;
pub mod substrate;
File diff suppressed because it is too large Load Diff
+137 -326
View File
@@ -3,17 +3,25 @@
//! ## Required Binaries
//! This module requires the following binaries to be compiled and available in your PATH:
//!
//! 1. **polkadot-parachain**: ```bash git clone https://github.com/paritytech/polkadot-sdk.git cd
//! polkadot-sdk cargo build --release --locked -p polkadot-parachain-bin --bin
//! polkadot-parachain ```
//! 1. **polkadot-parachain**:
//! ```bash
//! git clone https://github.com/paritytech/polkadot-sdk.git
//! cd polkadot-sdk
//! cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain
//! ```
//!
//! 2. **eth-rpc** (Revive EVM RPC server): ```bash git clone https://github.com/paritytech/polkadot-sdk.git
//! cd polkadot-sdk cargo build --locked --profile production -p pallet-revive-eth-rpc --bin
//! eth-rpc ```
//! 2. **eth-rpc** (Revive EVM RPC server):
//! ```bash
//! git clone https://github.com/paritytech/polkadot-sdk.git
//! cd polkadot-sdk
//! cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc
//! ```
//!
//! 3. **polkadot** (for the relay chain): ```bash # In polkadot-sdk directory cargo build --locked
//! --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin
//! polkadot-execute-worker ```
//! 3. **polkadot** (for the relay chain):
//! ```bash
//! # In polkadot-sdk directory
//! cargo build --locked --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker
//! ```
//!
//! Make sure to add the build output directories to your PATH or provide
//! the full paths in your configuration.
@@ -22,7 +30,6 @@ use std::{
fs::{create_dir_all, remove_dir_all},
path::PathBuf,
pin::Pin,
process::{Command, Stdio},
sync::{
Arc,
atomic::{AtomicU32, Ordering},
@@ -32,8 +39,8 @@ use std::{
use alloy::{
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet},
genesis::Genesis,
network::EthereumWallet,
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
@@ -47,15 +54,13 @@ use alloy::{
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use async_stream::stream;
use futures::Stream;
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use tokio::sync::OnceCell;
use tracing::instrument;
use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder, NetworkConfigExt};
@@ -63,18 +68,25 @@ use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder, NetworkConfigExt};
use crate::{
Node,
constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior},
node_implementations::substrate::ReviveNetwork,
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
helpers::Process,
node_implementations::common::{
chainspec::export_and_patch_chainspec_json,
process::{command_version, spawn_eth_rpc_process},
revive::ReviveNetwork,
},
provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// A Zombienet network where collator is `polkadot-parachain` node with `eth-rpc`
/// [`ZombieNode`] abstracts away the details of managing the zombienet network and provides
/// an interface to interact with the parachain's Ethereum RPC.
/// A Zombienet network where collator is `polkadot-parachain` node with `eth-rpc` [`ZombieNode`]
/// abstracts away the details of managing the zombienet network and provides an interface to
/// interact with the parachain's Ethereum RPC.
#[derive(Debug, Default)]
pub struct ZombieNode {
pub struct ZombienetNode {
/* Node Identifier */
id: u32,
connection_string: String,
@@ -102,7 +114,7 @@ pub struct ZombieNode {
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
}
impl ZombieNode {
impl ZombienetNode {
const BASE_DIRECTORY: &str = "zombienet";
const DATA_DIRECTORY: &str = "data";
const LOGS_DIRECTORY: &str = "logs";
@@ -151,7 +163,7 @@ impl ZombieNode {
}
}
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -161,7 +173,16 @@ impl ZombieNode {
.context("Failed to create logs directory for zombie node")?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
self.prepare_chainspec(template_chainspec_path.clone(), genesis)?;
export_and_patch_chainspec_json(
&self.polkadot_parachain_path,
Self::EXPORT_CHAINSPEC_COMMAND,
"asset-hub-westend-local",
&template_chainspec_path,
&mut genesis,
&self.wallet,
INITIAL_BALANCE,
)?;
let polkadot_parachain_path = self
.polkadot_parachain_path
.to_str()
@@ -170,25 +191,35 @@ impl ZombieNode {
let node_rpc_port = Self::NODE_BASE_RPC_PORT + self.id as u16;
let network_config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("westend-local")
.with_relaychain(|relay_chain| {
relay_chain
.with_chain("westend-local")
.with_default_command("polkadot")
.with_node(|node| node.with_name("alice"))
.with_node(|node| node.with_name("bob"))
})
.with_global_settings(|g| g.with_base_dir(&self.base_directory))
.with_parachain(|p| {
p.with_id(Self::PARACHAIN_ID)
.with_chain_spec_path(template_chainspec_path.to_str().unwrap())
.with_global_settings(|global_settings| {
// global_settings.with_base_dir(&self.base_directory)
global_settings
})
.with_parachain(|parachain| {
parachain
.with_id(Self::PARACHAIN_ID)
.with_chain_spec_path(template_chainspec_path.to_path_buf())
.with_chain("asset-hub-westend-local")
.with_collator(|n| {
n.with_name("Collator")
.with_collator(|node_config| {
node_config
.with_name("Collator")
.with_command(polkadot_parachain_path)
.with_rpc_port(node_rpc_port)
.with_args(vec![
("--pool-limit", u32::MAX.to_string().as_str()).into(),
("--pool-kbytes", u32::MAX.to_string().as_str()).into(),
])
})
})
.build()
.map_err(|e| anyhow::anyhow!("Failed to build zombienet network config: {e:?}"))?;
.map_err(|err| anyhow::anyhow!("Failed to build zombienet network config: {err:?}"))?;
self.node_rpc_port = Some(node_rpc_port);
self.network_config = Some(network_config);
@@ -202,6 +233,9 @@ impl ZombieNode {
.clone()
.context("Node not initialized, call init() first")?;
// TODO: Look into the possibility of removing this in the future, perhaps by reintroducing
// the blocking runtime abstraction and making it available to the entire program so that we
// don't need to be spawning multiple different runtimes.
let rt = tokio::runtime::Runtime::new().unwrap();
let network = rt.block_on(async {
network_config
@@ -215,30 +249,13 @@ impl ZombieNode {
let node_url = format!("ws://localhost:{}", self.node_rpc_port.unwrap());
let eth_rpc_port = Self::ETH_RPC_BASE_PORT + self.id as u16;
let eth_rpc_process = Process::new(
"proxy",
let eth_rpc_process = spawn_eth_rpc_process(
self.logs_directory.as_path(),
self.eth_proxy_binary.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--node-rpc-url")
.arg(node_url)
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--rpc-port")
.arg(eth_rpc_port.to_string())
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
None => Ok(false),
}),
},
&node_url,
eth_rpc_port,
&[], // extra args
Self::ETH_RPC_READY_MARKER,
);
match eth_rpc_process {
@@ -259,115 +276,8 @@ impl ZombieNode {
Ok(())
}
fn prepare_chainspec(
&mut self,
template_chainspec_path: PathBuf,
mut genesis: Genesis,
) -> anyhow::Result<()> {
let mut cmd: Command = std::process::Command::new(&self.polkadot_parachain_path);
cmd.arg(Self::EXPORT_CHAINSPEC_COMMAND)
.arg("--chain")
.arg("asset-hub-westend-local");
let output = cmd.output().context("Failed to export the chain-spec")?;
if !output.status.success() {
anyhow::bail!(
"Build chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content = String::from_utf8(output.stdout)
.context("Failed to decode collators chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse collators chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array()
.cloned()
.unwrap_or_default();
let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances
.into_iter()
.filter_map(|val| {
if let Some(arr) = val.as_array() {
if arr.len() == 2 {
let account = arr[0].as_str()?.to_string();
let balance = arr[1].as_f64()? as u128;
return Some((account, balance));
}
}
None
})
.collect();
let mut eth_balances = {
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
// Note, the use of the entry API here means that we only modify the entries for any
// account that is not in the `alloc` field of the genesis state.
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
self.extract_balance_from_genesis_file(&genesis)
.context("Failed to extract balances from EVM genesis JSON")?
};
merged_balances.append(&mut eth_balances);
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] =
json!(merged_balances);
let writer = std::fs::File::create(&template_chainspec_path)
.context("Failed to create template chainspec file")?;
serde_json::to_writer_pretty(writer, &chainspec_json)
.context("Failed to write template chainspec JSON")?;
Ok(())
}
fn extract_balance_from_genesis_file(
&self,
genesis: &Genesis,
) -> anyhow::Result<Vec<(String, u128)>> {
genesis
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let polkadot_address = Self::eth_to_polkadot_address(address);
let balance = acc.balance.try_into()?;
vec.push((polkadot_address, balance));
Ok(vec)
})
}
fn eth_to_polkadot_address(address: &Address) -> String {
let eth_bytes = address.0.0;
let mut padded = [0xEEu8; 32];
padded[..20].copy_from_slice(&eth_bytes);
let account_id = AccountId32::from(padded);
account_id.to_ss58check()
}
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_proxy_binary)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).trim().to_string())
command_version(&self.eth_proxy_binary)
}
async fn provider(
@@ -377,7 +287,7 @@ impl ZombieNode {
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<ReviveNetwork, _>(
self.connection_string.as_str(),
FallbackGasFiller::new(250_000_000, 5_000_000_000, 1_000_000_000),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -390,7 +300,7 @@ impl ZombieNode {
}
}
impl EthereumNode for ZombieNode {
impl EthereumNode for ZombienetNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
@@ -440,17 +350,11 @@ impl EthereumNode for ZombieNode {
transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let receipt = self
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Failed to submit transaction to proxy")?
.get_receipt()
.await
.context("Failed to fetch transaction receipt from proxy")?;
Ok(receipt)
.context("Failed to create the provider")?;
execute_transaction(provider, transaction).await
})
}
@@ -544,49 +448,48 @@ impl EthereumNode for ZombieNode {
+ '_,
>,
> {
fn create_stream(
provider: ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>,
) -> impl Stream<Item = MinedBlockInformation> {
stream! {
let mut block_number = provider.get_block_number().await.expect("Failed to get the block number");
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let Ok(Some(block)) = provider.get_block_by_number(BlockNumberOrTag::Number(block_number)).await
else {
continue;
};
block_number += 1;
yield MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
};
};
}
}
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let mut block_subscription = provider
.watch_full_blocks()
.await
.context("Failed to create the blocks stream")?;
block_subscription.set_channel_size(0xFFFF);
block_subscription.set_poll_interval(Duration::from_secs(1));
let block_stream = block_subscription.into_stream();
.context("Failed to create the provider for a block subscription")?;
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
});
let stream = Box::pin(create_stream(provider))
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>;
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
Ok(stream)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
@@ -719,7 +622,7 @@ impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
}
}
impl Node for ZombieNode {
impl Node for ZombienetNode {
fn shutdown(&mut self) -> anyhow::Result<()> {
// Kill the eth_rpc process
drop(self.eth_rpc_process.take());
@@ -750,21 +653,11 @@ impl Node for ZombieNode {
}
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.polkadot_parachain_path)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.context("Failed execute --version")?
.wait_with_output()
.context("Failed to wait --version")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
command_version(&self.polkadot_parachain_path)
}
}
impl Drop for ZombieNode {
impl Drop for ZombienetNode {
fn drop(&mut self) {
let _ = self.shutdown();
}
@@ -774,7 +667,9 @@ impl Drop for ZombieNode {
mod tests {
use alloy::rpc::types::TransactionRequest;
use crate::node_implementations::zombienet::tests::utils::shared_node;
use crate::node_implementations::{
common::chainspec::eth_to_polkadot_address, zombienet::tests::utils::shared_node,
};
use super::*;
@@ -788,9 +683,9 @@ mod tests {
TestExecutionContext::default()
}
pub async fn new_node() -> (TestExecutionContext, ZombieNode) {
pub async fn new_node() -> (TestExecutionContext, ZombienetNode) {
let context = test_config();
let mut node = ZombieNode::new(
let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -808,8 +703,9 @@ mod tests {
(context, node)
}
pub async fn shared_state() -> &'static (TestExecutionContext, Arc<ZombieNode>) {
static NODE: OnceCell<(TestExecutionContext, Arc<ZombieNode>)> = OnceCell::const_new();
pub async fn shared_state() -> &'static (TestExecutionContext, Arc<ZombienetNode>) {
static NODE: OnceCell<(TestExecutionContext, Arc<ZombienetNode>)> =
OnceCell::const_new();
NODE.get_or_init(|| async {
let (context, node) = new_node().await;
@@ -818,13 +714,14 @@ mod tests {
.await
}
pub async fn shared_node() -> &'static Arc<ZombieNode> {
pub async fn shared_node() -> &'static Arc<ZombienetNode> {
&shared_state().await.1
}
}
use utils::{new_node, test_config};
#[tokio::test]
#[ignore = "Ignored for the time being"]
async fn test_transfer_transaction_should_return_receipt() {
let (ctx, node) = new_node().await;
@@ -858,7 +755,7 @@ mod tests {
"#;
let context = test_config();
let mut node = ZombieNode::new(
let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -868,19 +765,19 @@ mod tests {
.expect("init failed");
// Check that the patched chainspec file was generated
let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE);
let final_chainspec_path = node
.base_directory
.join(ZombienetNode::CHAIN_SPEC_JSON_FILE);
assert!(final_chainspec_path.exists(), "Chainspec file should exist");
let contents =
std::fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec");
// Validate that the Polkadot addresses derived from the Ethereum addresses are in the file
let first_eth_addr = ZombieNode::eth_to_polkadot_address(
&"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(),
);
let second_eth_addr = ZombieNode::eth_to_polkadot_address(
&"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(),
);
let first_eth_addr =
eth_to_polkadot_address(&"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap());
let second_eth_addr =
eth_to_polkadot_address(&"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap());
assert!(
contents.contains(&first_eth_addr),
@@ -892,97 +789,11 @@ mod tests {
);
}
#[tokio::test]
async fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
{
"alloc": {
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" },
"0x0000000000000000000000000000000000000000": { "balance": "0xDE0B6B3A7640000" },
"0xffffffffffffffffffffffffffffffffffffffff": { "balance": "123456789" }
}
}
"#;
let context = test_config();
let node = ZombieNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
let result = node
.extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
.unwrap();
let result_map: std::collections::HashMap<_, _> = result.into_iter().collect();
assert_eq!(
result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"),
Some(&123_456_789u128)
);
}
#[test]
fn print_eth_to_polkadot_mappings() {
let eth_addresses = vec![
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"0xffffffffffffffffffffffffffffffffffffffff",
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
];
for eth_addr in eth_addresses {
let ss58 = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
println!("Ethereum: {eth_addr} -> Polkadot SS58: {ss58}");
}
}
#[test]
fn test_eth_to_polkadot_address() {
let cases = vec![
(
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"0x0000000000000000000000000000000000000000",
"5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1",
),
(
"0xffffffffffffffffffffffffffffffffffffffff",
"5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4",
),
];
for (eth_addr, expected_ss58) in cases {
let result = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
assert_eq!(
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
);
}
}
#[test]
fn eth_rpc_version_works() {
// Arrange
let context = test_config();
let node = ZombieNode::new(
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -1001,7 +812,7 @@ mod tests {
fn version_works() {
// Arrange
let context = test_config();
let node = ZombieNode::new(
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -7,6 +7,10 @@ use alloy::{
transports::TransportResult,
};
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
#[derive(Clone, Debug)]
pub struct FallbackGasFiller {
inner: GasFiller,
@@ -56,8 +60,6 @@ where
provider: &P,
tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers “fillable” (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
match self.inner.prepare(provider, tx).await {
Ok(fill) => Ok(Some(fill)),
Err(_) => Ok(None),
@@ -70,8 +72,17 @@ where
mut tx: alloy::providers::SendableTx<N>,
) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable {
// our inner GasFiller succeeded — use it
self.inner.fill(fill, tx).await
let mut tx = self.inner.fill(fill, tx).await?;
if let Some(builder) = tx.as_mut_builder() {
if let Some(estimated) = builder.gas_limit() {
let padded = estimated
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
+2 -2
View File
@@ -1,7 +1,7 @@
mod concurrency_limiter;
mod fallback_gas_provider;
mod fallback_gas_filler;
mod provider;
pub use concurrency_limiter::*;
pub use fallback_gas_provider::*;
pub use fallback_gas_filler::*;
pub use provider::*;
+7 -3
View File
@@ -10,7 +10,7 @@ use alloy::{
};
use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::debug;
use tracing::{Instrument, debug, info, info_span};
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
@@ -44,7 +44,7 @@ where
// requests at any point of time and no more than that. This is done in an effort to stabilize
// the framework from some of the interment issues that we've been seeing related to RPC calls.
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
@@ -117,12 +117,16 @@ where
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(Some(receipt)) => {
info!("Found the transaction receipt");
Ok(ControlFlow::Break(receipt))
}
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.instrument(info_span!("Polling for receipt", %tx_hash))
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
+1 -2
View File
@@ -442,8 +442,7 @@ impl Report {
#[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was
/// ignored.
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>,
/// Information related to the execution on one of the platforms.
+2 -4
View File
@@ -8,10 +8,8 @@ use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::{
corpus::Corpus,
metadata::{ContractInstance, Metadata},
};
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
+3 -4
View File
@@ -93,17 +93,16 @@ echo ""
# Run the tool
cargo build --release;
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
--platform revive-dev-node-revm-solc \
--platform geth-evm-solc \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 5 \
--concurrency.number-of-concurrent-tasks 1000 \
--concurrency.ignore-concurrency-limit \
--wallet.additional-keys 100000 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \
2> output.log
> logs.log
echo -e "${GREEN}=== Test run completed! ===${NC}"