Compare commits

...

7 Commits

Author SHA1 Message Date
pgherveou 1659164310 save before flight 2025-10-13 13:05:07 +02:00
pgherveou 0a68800856 nit 2025-10-08 18:26:43 +02:00
pgherveou 8303d789cd use 10^6 for gas filler 2025-10-08 15:15:08 +02:00
pgherveou 40bf44fe58 fix 2025-10-08 14:50:50 +02:00
pgherveou ba8ad03290 fix 2025-10-08 14:06:03 +02:00
pgherveou 3dd99f3ac8 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 11:42:37 +00:00
pgherveou 6618463c68 fix 2025-10-08 11:40:08 +00:00
9 changed files with 279 additions and 150 deletions
-19
View File
@@ -18,28 +18,9 @@ env:
POLKADOT_VERSION: polkadot-stable2506-2
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install nightly toolchain
run: rustup toolchain install nightly
- name: Install rustfmt for nightly
run: rustup component add --toolchain nightly rustfmt
- name: Cargo fmt
run: cargo +nightly fmt --all -- --check
cache-polkadot:
name: Build and cache Polkadot binaries on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: [fmt]
strategy:
matrix:
os: [ubuntu-24.04, macos-14]
+1 -1
View File
@@ -1,7 +1,7 @@
.PHONY: format clippy test machete
format:
cargo fmt --all -- --check
cargo +nightly fmt --all -- --check
clippy:
cargo clippy --all-features --workspace -- --deny warnings
+33 -14
View File
@@ -354,24 +354,43 @@ where
_: &StepPath,
step: &FunctionCallStep,
) -> Result<usize> {
// Check if this step expects an exception
let expects_exception = step.expected.as_ref().map_or(false, |expected| match expected {
Expected::Expected(exp) => exp.exception,
Expected::ExpectedMany(exps) => exps.iter().any(|exp| exp.exception),
Expected::Calldata(_) => false,
});
let deployment_receipts = self
.handle_function_call_contract_deployment(step)
.await
.context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
.handle_function_call_execution(step, deployment_receipts)
.await
.context("Failed to handle the function call execution")?;
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
.await
.context("Failed to handle function call variable assignment")?;
self.handle_function_call_assertions(step, &execution_receipt, &tracing_result)
.await
.context("Failed to handle function call assertions")?;
let execution_receipt =
match self.handle_function_call_execution(step, deployment_receipts).await {
Ok(receipt) => Some(receipt),
Err(err) => {
if !expects_exception {
return Err(err).context("Failed to handle the function call execution");
}
tracing::info!("Transaction failed as expected");
None
},
};
if let Some(execution_receipt) = execution_receipt {
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
.await
.context("Failed to handle function call variable assignment")?;
self.handle_function_call_assertions(step, &execution_receipt, &tracing_result)
.await
.context("Failed to handle function call assertions")?;
}
Ok(1)
}
+225 -98
View File
@@ -21,7 +21,7 @@ use std::{
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::Instant,
time::{Duration, Instant},
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
@@ -40,6 +40,10 @@ struct MlTestRunnerArgs {
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// File to store tests that have failed (defaults to .<platform>-failed)
#[arg(long = "cached-failed")]
cached_failed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
@@ -62,17 +66,25 @@ struct MlTestRunnerArgs {
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
/// Show verbose output including cached tests and detailed error messages
#[arg(long = "verbose", short = 'v')]
verbose: bool,
}
fn main() -> anyhow::Result<()> {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber");
let args = MlTestRunnerArgs::parse();
// Only set up tracing if RUST_LOG is explicitly set or --verbose is passed
if std::env::var("RUST_LOG").is_ok() || args.verbose {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Failed to set tracing subscriber");
}
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
@@ -84,6 +96,40 @@ fn main() -> anyhow::Result<()> {
.block_on(run(args))
}
/// Wait for HTTP server to be ready by attempting to connect to the specified port
async fn wait_for_http_server(port: u16) -> anyhow::Result<()> {
const MAX_RETRIES: u32 = 60;
const RETRY_DELAY: Duration = Duration::from_secs(1);
for attempt in 1..=MAX_RETRIES {
match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)).await {
Ok(_) => {
info!("Successfully connected to HTTP server on port {} (attempt {})", port, attempt);
return Ok(());
},
Err(e) => {
if attempt == MAX_RETRIES {
anyhow::bail!(
"Failed to connect to HTTP server on port {} after {} attempts: {}",
port,
MAX_RETRIES,
e
);
}
if attempt % 10 == 0 {
info!(
"Still waiting for HTTP server on port {} (attempt {}/{})",
port, attempt, MAX_RETRIES
);
}
tokio::time::sleep(RETRY_DELAY).await;
},
}
}
unreachable!()
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
@@ -101,6 +147,84 @@ async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let cached_passed = Arc::new(Mutex::new(cached_passed));
// Set up cached-failed file (defaults to .<platform>-failed)
let cached_failed_path = args
.cached_failed
.clone()
.unwrap_or_else(|| PathBuf::from(format!(".{:?}-failed", args.platform)));
let cached_failed = Arc::new(Mutex::new(HashSet::<String>::new()));
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc =>
&revive_dt_core::KitchensinkPolkavmResolcPlatform,
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform,
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc =>
&revive_dt_core::ZombienetPolkavmResolcPlatform,
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle =
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions().await.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node at port {}", args.rpc_port);
// Wait for the HTTP server to be ready
info!("Waiting for HTTP server to be ready on port {}...", args.rpc_port);
wait_for_http_server(args.rpc_port).await?;
info!("HTTP server is ready");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc =>
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
PlatformIdentifier::KitchensinkPolkavmResolc |
PlatformIdentifier::KitchensinkRevmSolc |
PlatformIdentifier::ReviveDevNodePolkavmResolc |
PlatformIdentifier::ReviveDevNodeRevmSolc |
PlatformIdentifier::ZombienetPolkavmResolc |
PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
@@ -116,11 +240,14 @@ async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
for test_file in test_files {
let file_display = test_file.display().to_string();
info!("\n\n == Executing test file: {file_display} == \n\n");
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display);
if args.verbose {
println!("test {file_display} ... {YELLOW}cached{COLOUR_RESET}");
}
skipped_files += 1;
continue;
}
@@ -133,33 +260,54 @@ async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
mf
},
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
println!(" Error loading metadata: {}", e);
failed_files += 1;
failures.push((file_display.clone(), format!("Error loading metadata: {}", e)));
if args.bail {
break;
}
// Skip files without metadata instead of treating them as failures
info!("Skipping {} (no metadata): {}", file_display, e);
skipped_files += 1;
continue;
},
};
info!("Executing test file: {}", file_display);
match execute_test_file(&args, &metadata_file).await {
// Execute test with 10 second timeout
let test_result = tokio::time::timeout(
Duration::from_secs(20),
execute_test_file(&metadata_file, platform, node, &context),
)
.await;
let result = match test_result {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => Err(e),
Err(_) => Err(anyhow::anyhow!("Test timed out after 20 seconds")),
};
match result {
Ok(_) => {
println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display);
info!("Test file passed: {}", file_display);
println!("test {file_display} ... {GREEN}ok{COLOUR_RESET}");
passed_files += 1;
{
// Update cache
if let Some(cache_file) = &args.cached_passed {
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_passed(cache_file, &cache) {
info!("Failed to save cache: {}", e);
}
}
},
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
println!("test {file_display} ... {RED}FAILED{COLOUR_RESET}");
failed_files += 1;
failures.push((file_display, format!("{:?}", e)));
let error_detail = if args.verbose { format!("{:?}", e) } else { format!("{}", e) };
failures.push((file_display.clone(), error_detail));
// Update cached-failed
{
let mut cache = cached_failed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_failed(&cached_failed_path, &cache) {
info!("Failed to save cached-failed: {}", e);
}
}
if args.bail {
info!("Bailing after first failure");
@@ -169,15 +317,9 @@ async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
}
}
if let Some(cache_file) = &args.cached_passed {
let cache = cached_passed.lock().await;
info!("Saving {} cached passed test(s)", cache.len());
save_cached_passed(cache_file, &cache)?;
}
// Print summary
println!();
if !failures.is_empty() {
if !failures.is_empty() && args.verbose {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
@@ -235,12 +377,43 @@ fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
_ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension),
}
} else if path.is_dir() {
// Walk directory recursively for .sol files
for entry in FilesWithExtensionIterator::new(path)
// First, find all test.json files
let mut test_json_dirs = HashSet::new();
for json_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("json")
.with_use_cached_fs(true)
{
if json_file.file_name().and_then(|s| s.to_str()) == Some("test.json") {
if let Some(parent) = json_file.parent() {
test_json_dirs.insert(parent.to_path_buf());
}
// Try to parse as corpus file first, then as metadata file
if let Ok(corpus) = Corpus::try_from_path(&json_file) {
// It's a corpus file - enumerate its tests
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
} else {
// It's a metadata file - use it directly
files.push(json_file);
}
}
}
// Then, find .sol files that are NOT in directories with test.json
for sol_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
files.push(entry);
if let Some(parent) = sol_file.parent() {
if !test_json_dirs.contains(parent) {
files.push(sol_file);
}
} else {
files.push(sol_file);
}
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
@@ -263,8 +436,10 @@ fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
/// Execute all test cases in a metadata file
async fn execute_test_file(
args: &MlTestRunnerArgs,
metadata_file: &MetadataFile,
platform: &dyn Platform,
node: &'static dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
@@ -272,73 +447,9 @@ async fn execute_test_file(
info!("Processing {} test case(s)", metadata_file.cases.len());
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc => {
&revive_dt_core::KitchensinkPolkavmResolcPlatform
},
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform
},
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc => &revive_dt_core::ZombienetPolkavmResolcPlatform,
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle =
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions().await.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc => Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
PlatformIdentifier::KitchensinkPolkavmResolc
| PlatformIdentifier::KitchensinkRevmSolc
| PlatformIdentifier::ReviveDevNodePolkavmResolc
| PlatformIdentifier::ReviveDevNodeRevmSolc
| PlatformIdentifier::ZombienetPolkavmResolc
| PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
@@ -467,7 +578,7 @@ async fn build_test_definition<'a>(
};
if let Err((reason, _)) = test_definition.check_compatibility() {
println!(" Skipping case {}: {}", case_idx, reason);
info!("Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
@@ -510,3 +621,19 @@ fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()
writer.flush()?;
Ok(())
}
/// Save cached failed tests to file
fn save_cached_failed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-failed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+2 -2
View File
@@ -94,8 +94,8 @@ impl GethNode {
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(10);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(10);
pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
@@ -116,7 +116,7 @@ impl LighthouseGethNode {
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(30);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
@@ -428,16 +428,20 @@ impl EthereumNode for ZombieNode {
transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let receipt = self
let pending = self
.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Failed to submit transaction to proxy")?
.get_receipt()
.await
.context("Failed to fetch transaction receipt from proxy")?;
.context("Failed to submit transaction to proxy")?;
let receipt =
tokio::time::timeout(std::time::Duration::from_secs(120), pending.get_receipt())
.await
.context("Timeout waiting for transaction receipt")?
.context("Failed to fetch transaction receipt from proxy")?;
Ok(receipt)
})
}
@@ -27,7 +27,7 @@ impl FallbackGasFiller {
impl Default for FallbackGasFiller {
fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
FallbackGasFiller::new(10_000_000, 1_000_000_000, 1_000_000_000)
}
}
@@ -51,12 +51,9 @@ where
provider: &P,
tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers fillable (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
match self.inner.prepare(provider, tx).await {
Ok(fill) => Ok(Some(fill)),
Err(_) => Ok(None),
}
// Try to fetch GasFiller's "fillable" (gas_price, base_fee, estimate_gas, …)
// Propagate errors so caller can handle them appropriately
self.inner.prepare(provider, tx).await.map(Some)
}
async fn fill(
+4 -3
View File
@@ -108,9 +108,10 @@ where
.await
.context(format!("Transaction inclusion watching timeout for {tx_hash}"))?;
poll(Duration::from_secs(60), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
let provider = provider.clone();
debug!(%tx_hash, "Transaction included, polling for receipt");
poll(Duration::from_secs(30), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
@@ -119,5 +120,5 @@ where
}
})
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
.context(format!("Polling for receipt timed out for {tx_hash}"))
}