Compare commits

..

1 Commits

Author SHA1 Message Date
Omar Abdulla 4ad3084fe1 Cache the chainspec 2025-12-03 19:37:01 +03:00
16 changed files with 131 additions and 535 deletions
@@ -1,105 +0,0 @@
name: "Run Revive Differential Tests"
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
inputs:
# Setup arguments & environment
polkadot-sdk-path:
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
required: false
default: "."
type: string
cargo-command:
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
required: false
default: "cargo"
type: string
revive-differential-tests-ref:
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
required: false
default: "main"
type: string
resolc-version:
description: "The version of resolc to install and use in tests."
required: false
default: "0.5.0"
type: string
use-compilation-caches:
description: "Controls if the compilation caches will be used for the test run or not."
required: false
default: true
type: boolean
# Test Execution Arguments
platform:
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
required: true
type: string
runs:
using: "composite"
steps:
- name: Checkout the Differential Tests Repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: paritytech/revive-differential-tests
ref: ${{ inputs['revive-differential-tests-ref'] }}
path: revive-differential-tests
submodules: recursive
- name: Installing the Latest Resolc
shell: bash
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
run: |
VERSION="${{ inputs['resolc-version'] }}"
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
echo "Downloading resolc v$VERSION from $ASSET_URL"
curl -Lsf --show-error -o resolc "$ASSET_URL"
chmod +x resolc
./resolc --version
- name: Installing Retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
- name: Creating a workdir for retester
shell: bash
run: mkdir workdir
- name: Downloading & Initializing the compilation caches
shell: bash
if: ${{ inputs['use-compilation-caches'] == true }}
run: |
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
- name: Building the dependencies from the Polkadot SDK
shell: bash
run: ${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
- name: Running the Differential Tests
shell: bash
run: |
${{ inputs['cargo-command'] }} run --locked --manifest-path revive-differential-tests/Cargo.toml -- test \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
--platform ${{ inputs['platform'] }} \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 10 \
--concurrency.number-of-concurrent-tasks 100 \
--working-directory ./workdir \
--revive-dev-node.consensus manual-seal-200 \
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
--resolc.path ./resolc
- name: Creating a markdown report of the test execution
shell: bash
if: ${{ always() }}
run: |
mv ./workdir/*.json report.json
python3 revive-differential-tests/scripts/process-differential-tests-report.py report.json ${{ inputs['platform'] }}
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
if: ${{ always() }}
with:
name: report-${{ inputs['platform'] }}.md
path: report.md
- name: Posting the report as a comment on the PR
uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405
if: ${{ always() }}
with:
header: diff-tests-report-${{ inputs['platform'] }}
path: report.md
-17
View File
@@ -375,23 +375,6 @@ pub struct BenchmarkingContext {
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)] #[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
pub default_repetition_count: usize, pub default_repetition_count: usize,
/// This transaction controls whether the benchmarking driver should await for transactions to
/// be included in a block before moving on to the next transaction in the sequence or not.
///
/// This behavior is useful in certain cases and not so useful in others. For example, in some
/// repetition block if there's some kind of relationship between txs n and n+1 (for example a
/// mint then a transfer) then you would want to wait for the minting to happen and then move on
/// to the transfers. On the other hand, if there's no relationship between the transactions n
/// and n+1 (e.g., mint and another mint of a different token) then awaiting the first mint to
/// be included in a block might not seem necessary.
///
/// By default, this behavior is set to false to allow the benchmarking framework to saturate
/// the node's mempool as quickly as possible. However, as explained above, there are cases
/// where it's needed and certain workloads where failure to provide this argument would lead to
/// inaccurate results.
#[arg(long)]
pub await_transaction_inclusion: bool,
/// Configuration parameters for the corpus files to use. /// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")] #[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration, pub corpus_configuration: CorpusConfiguration,
@@ -1,5 +1,6 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
ops::ControlFlow,
sync::{ sync::{
Arc, Arc,
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
@@ -12,7 +13,6 @@ use alloy::{
json_abi::JsonAbi, json_abi::JsonAbi,
network::{Ethereum, TransactionBuilder}, network::{Ethereum, TransactionBuilder},
primitives::{Address, TxHash, U256}, primitives::{Address, TxHash, U256},
providers::Provider,
rpc::types::{ rpc::types::{
TransactionReceipt, TransactionRequest, TransactionReceipt, TransactionRequest,
trace::geth::{ trace::geth::{
@@ -22,9 +22,12 @@ use alloy::{
}, },
}; };
use anyhow::{Context as _, Result, bail}; use anyhow::{Context as _, Result, bail};
use futures::{FutureExt as _, TryFutureExt}; use futures::TryFutureExt;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PrivateKeyAllocator; use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
types::PrivateKeyAllocator,
};
use revive_dt_format::{ use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent}, metadata::{ContractInstance, ContractPathAndIdent},
steps::{ steps::{
@@ -34,7 +37,7 @@ use revive_dt_format::{
traits::{ResolutionContext, ResolverApi}, traits::{ResolutionContext, ResolverApi},
}; };
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender}; use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Span, debug, error, field::display, info, instrument}; use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{ use crate::{
differential_benchmarks::{ExecutionState, WatcherEvent}, differential_benchmarks::{ExecutionState, WatcherEvent},
@@ -70,10 +73,6 @@ pub struct Driver<'a, I> {
/// The number of steps that were executed on the driver. /// The number of steps that were executed on the driver.
steps_executed: usize, steps_executed: usize,
/// This function controls if the driver should wait for transactions to be included in a block
/// or not before proceeding forward.
await_transaction_inclusion: bool,
/// This is the queue of steps that are to be executed by the driver for this test case. Each /// This is the queue of steps that are to be executed by the driver for this test case. Each
/// time `execute_step` is called one of the steps is executed. /// time `execute_step` is called one of the steps is executed.
steps_iterator: I, steps_iterator: I,
@@ -90,7 +89,6 @@ where
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
cached_compiler: &CachedCompiler<'a>, cached_compiler: &CachedCompiler<'a>,
watcher_tx: UnboundedSender<WatcherEvent>, watcher_tx: UnboundedSender<WatcherEvent>,
await_transaction_inclusion: bool,
steps: I, steps: I,
) -> Result<Self> { ) -> Result<Self> {
let mut this = Driver { let mut this = Driver {
@@ -106,7 +104,6 @@ where
execution_state: ExecutionState::empty(), execution_state: ExecutionState::empty(),
steps_executed: 0, steps_executed: 0,
steps_iterator: steps, steps_iterator: steps,
await_transaction_inclusion,
watcher_tx, watcher_tx,
}; };
this.init_execution_state(cached_compiler) this.init_execution_state(cached_compiler)
@@ -169,7 +166,7 @@ where
code, code,
); );
let receipt = self let receipt = self
.execute_transaction(tx, None, Duration::from_secs(5 * 60)) .execute_transaction(tx, None)
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
@@ -368,30 +365,7 @@ where
let tx = step let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context()) .as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?; .await?;
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
let (tx_hash, receipt_future) = self
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
.await?;
if self.await_transaction_inclusion {
let receipt = receipt_future
.await
.context("Failed while waiting for transaction inclusion in block")?;
if !receipt.status() {
error!(
?tx,
tx.hash = %receipt.transaction_hash,
?receipt,
"Encountered a failing benchmark transaction"
);
bail!(
"Encountered a failing transaction in benchmarks: {}",
receipt.transaction_hash
)
}
}
Ok(tx_hash)
} }
} }
} }
@@ -492,7 +466,6 @@ where
.collect::<Vec<_>>(); .collect::<Vec<_>>();
steps.into_iter() steps.into_iter()
}, },
await_transaction_inclusion: self.await_transaction_inclusion,
watcher_tx: self.watcher_tx.clone(), watcher_tx: self.watcher_tx.clone(),
}) })
.map(|driver| driver.execute_all()); .map(|driver| driver.execute_all());
@@ -659,7 +632,7 @@ where
}; };
let receipt = match self let receipt = match self
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60)) .execute_transaction(tx, step_path)
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
{ {
@@ -704,33 +677,18 @@ where
#[instrument( #[instrument(
level = "info", level = "info",
skip_all, skip_all,
fields( fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
driver_id = self.driver_id,
transaction = ?transaction,
transaction_hash = tracing::field::Empty
),
err(Debug)
)] )]
async fn execute_transaction( async fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
step_path: Option<&StepPath>, step_path: Option<&StepPath>,
receipt_wait_duration: Duration,
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> { ) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node; let node = self.platform_information.node;
let provider = node.provider().await.context("Creating provider failed")?; let transaction_hash = node
.submit_transaction(transaction)
let pending_transaction_builder = provider
.send_transaction(transaction)
.await .await
.context("Failed to submit transaction")?; .context("Failed to submit transaction")?;
let transaction_hash = *pending_transaction_builder.tx_hash();
let receipt_future = pending_transaction_builder
.with_timeout(Some(receipt_wait_duration))
.with_required_confirmations(2)
.get_receipt()
.map(|res| res.context("Failed to get the receipt of the transaction"));
Span::current().record("transaction_hash", display(transaction_hash)); Span::current().record("transaction_hash", display(transaction_hash));
info!("Submitted transaction"); info!("Submitted transaction");
@@ -743,7 +701,28 @@ where
.context("Failed to send the transaction hash to the watcher")?; .context("Failed to send the transaction hash to the watcher")?;
}; };
Ok((transaction_hash, receipt_future)) Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
} }
// endregion:Transaction Execution // endregion:Transaction Execution
} }
@@ -160,7 +160,6 @@ pub async fn handle_differential_benchmarks(
private_key_allocator, private_key_allocator,
cached_compiler.as_ref(), cached_compiler.as_ref(),
watcher_tx.clone(), watcher_tx.clone(),
context.await_transaction_inclusion,
test_definition test_definition
.case .case
.steps_iterator_for_benchmarks(context.default_repetition_count) .steps_iterator_for_benchmarks(context.default_repetition_count)
@@ -139,18 +139,23 @@ impl Watcher {
break; break;
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
// Remove all of the transaction hashes observed in this block from the txs we // Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for. // are currently watching for.
let mut watch_for_transaction_hashes = let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await; watch_for_transaction_hashes.write().await;
let mut relevant_transactions_observed = 0;
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() { for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
let Some((step_path, submission_time)) = let Some((step_path, submission_time)) =
watch_for_transaction_hashes.remove(tx_hash) watch_for_transaction_hashes.remove(tx_hash)
else { else {
continue; continue;
}; };
relevant_transactions_observed += 1;
let transaction_information = TransactionInformation { let transaction_information = TransactionInformation {
transaction_hash: *tx_hash, transaction_hash: *tx_hash,
submission_timestamp: submission_time submission_timestamp: submission_time
@@ -167,14 +172,6 @@ impl Watcher {
) )
.expect("Can't fail") .expect("Can't fail")
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
relevant_transactions_observed,
remaining_transactions = watch_for_transaction_hashes.len(),
"Observed a block"
);
} }
info!("Watcher's Block Watching Task Finished"); info!("Watcher's Block Watching Task Finished");
+4 -14
View File
@@ -91,8 +91,7 @@ impl Platform for GethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = GethNode::new(context);
let node = GethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<GethNode>(node, genesis)?; let node = spawn_node::<GethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -146,8 +145,7 @@ impl Platform for LighthouseGethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = LighthouseGethNode::new(context);
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?; let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -208,14 +206,12 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -278,14 +274,12 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -344,9 +338,7 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = ZombienetNode::new(polkadot_parachain_path, context);
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -403,9 +395,7 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = ZombienetNode::new(polkadot_parachain_path, context);
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
+6 -88
View File
@@ -2,10 +2,9 @@ mod differential_benchmarks;
mod differential_tests; mod differential_tests;
mod helpers; mod helpers;
use anyhow::{Context as _, bail}; use anyhow::Context as _;
use clap::Parser; use clap::Parser;
use revive_dt_common::types::ParsedTestSpecifier; use revive_dt_report::ReportAggregator;
use revive_dt_report::{ReportAggregator, TestCaseStatus};
use schemars::schema_for; use schemars::schema_for;
use tracing::{info, level_filters::LevelFilter}; use tracing::{info, level_filters::LevelFilter};
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -58,47 +57,8 @@ fn main() -> anyhow::Result<()> {
let differential_tests_handling_task = let differential_tests_handling_task =
handle_differential_tests(*context, reporter); handle_differential_tests(*context, reporter);
let (_, report) = futures::future::try_join( futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
differential_tests_handling_task, .await?;
report_aggregator_task,
)
.await?;
// Error out if there are any failing tests.
let failures = report
.execution_information
.into_iter()
.flat_map(|(metadata_file_path, metadata_file_report)| {
metadata_file_report.case_reports.into_iter().flat_map(
move |(case_idx, case_report)| {
let metadata_file_path = metadata_file_path.clone();
case_report.mode_execution_reports.into_iter().filter_map(
move |(mode, execution_report)| {
if let Some(TestCaseStatus::Failed { reason }) =
execution_report.status
{
let parsed_test_specifier =
ParsedTestSpecifier::CaseWithMode {
metadata_file_path: metadata_file_path
.clone()
.into_inner(),
case_idx: case_idx.into_inner(),
mode,
};
Some((parsed_test_specifier, reason))
} else {
None
}
},
)
},
)
})
.collect::<Vec<_>>();
if !failures.is_empty() {
bail!("Some tests failed: {failures:#?}")
}
Ok(()) Ok(())
}), }),
@@ -111,48 +71,12 @@ fn main() -> anyhow::Result<()> {
let differential_benchmarks_handling_task = let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter); handle_differential_benchmarks(*context, reporter);
let (_, report) = futures::future::try_join( futures::future::try_join(
differential_benchmarks_handling_task, differential_benchmarks_handling_task,
report_aggregator_task, report_aggregator_task,
) )
.await?; .await?;
// Error out if there are any failing tests.
let failures = report
.execution_information
.into_iter()
.flat_map(|(metadata_file_path, metadata_file_report)| {
metadata_file_report.case_reports.into_iter().flat_map(
move |(case_idx, case_report)| {
let metadata_file_path = metadata_file_path.clone();
case_report.mode_execution_reports.into_iter().filter_map(
move |(mode, execution_report)| {
if let Some(TestCaseStatus::Failed { reason }) =
execution_report.status
{
let parsed_test_specifier =
ParsedTestSpecifier::CaseWithMode {
metadata_file_path: metadata_file_path
.clone()
.into_inner(),
case_idx: case_idx.into_inner(),
mode,
};
Some((parsed_test_specifier, reason))
} else {
None
}
},
)
},
)
})
.collect::<Vec<_>>();
if !failures.is_empty() {
bail!("Some tests failed: {failures:#?}")
}
Ok(()) Ok(())
}), }),
Context::ExportGenesis(ref export_genesis_context) => { Context::ExportGenesis(ref export_genesis_context) => {
@@ -161,17 +85,11 @@ fn main() -> anyhow::Result<()> {
let genesis_json = serde_json::to_string_pretty(&genesis) let genesis_json = serde_json::to_string_pretty(&genesis)
.context("Failed to serialize the genesis to JSON")?; .context("Failed to serialize the genesis to JSON")?;
println!("{genesis_json}"); println!("{genesis_json}");
Ok(()) Ok(())
} }
Context::ExportJsonSchema => { Context::ExportJsonSchema => {
let schema = schema_for!(Metadata); let schema = schema_for!(Metadata);
println!( println!("{}", serde_json::to_string_pretty(&schema).unwrap());
"{}",
serde_json::to_string_pretty(&schema)
.context("Failed to export the JSON schema")?
);
Ok(()) Ok(())
} }
} }
+2 -6
View File
@@ -76,7 +76,6 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl GethNode { impl GethNode {
@@ -101,7 +100,6 @@ impl GethNode {
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<GethConfiguration> + AsRef<GethConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -128,7 +126,6 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -249,8 +246,7 @@ impl GethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default() FallbackGasFiller::default(),
.with_use_fallback_gas_filler(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -746,7 +742,7 @@ mod tests {
fn new_node() -> (TestExecutionContext, GethNode) { fn new_node() -> (TestExecutionContext, GethNode) {
let context = test_config(); let context = test_config();
let mut node = GethNode::new(&context, true); let mut node = GethNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -106,8 +106,6 @@ pub struct LighthouseGethNode {
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl LighthouseGethNode { impl LighthouseGethNode {
@@ -129,7 +127,6 @@ impl LighthouseGethNode {
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<KurtosisConfiguration> + AsRef<KurtosisConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -179,7 +176,6 @@ impl LighthouseGethNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
persistent_http_provider: OnceCell::const_new(), persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(), persistent_ws_provider: OnceCell::const_new(),
use_fallback_gas_filler,
} }
} }
@@ -378,8 +374,7 @@ impl LighthouseGethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.ws_connection_string.as_str(), self.ws_connection_string.as_str(),
FallbackGasFiller::default() FallbackGasFiller::default(),
.with_use_fallback_gas_filler(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -1157,7 +1152,7 @@ mod tests {
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config(); let context = test_config();
let mut node = LighthouseGethNode::new(&context, true); let mut node = LighthouseGethNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -79,7 +79,6 @@ pub struct SubstrateNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>, consensus: Option<String>,
use_fallback_gas_filler: bool,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -106,7 +105,6 @@ impl SubstrateNode {
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
existing_connection_strings: &[String], existing_connection_strings: &[String],
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_path = let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path(); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
@@ -139,7 +137,6 @@ impl SubstrateNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
consensus, consensus,
use_fallback_gas_filler,
} }
} }
@@ -327,12 +324,7 @@ impl SubstrateNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(), self.rpc_url.as_str(),
FallbackGasFiller::new( FallbackGasFiller::new(u64::MAX, 50_000_000_000, 1_000_000_000),
u64::MAX,
50_000_000_000,
1_000_000_000,
self.use_fallback_gas_filler,
),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -833,7 +825,6 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
@@ -905,7 +896,6 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
// Call `init()` // Call `init()`
@@ -114,8 +114,6 @@ pub struct ZombienetNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl ZombienetNode { impl ZombienetNode {
@@ -139,7 +137,6 @@ impl ZombienetNode {
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context) let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path .path
@@ -167,7 +164,6 @@ impl ZombienetNode {
connection_string: String::new(), connection_string: String::new(),
node_rpc_port: None, node_rpc_port: None,
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -334,12 +330,7 @@ impl ZombienetNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::new( FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
u64::MAX,
5_000_000_000,
1_000_000_000,
self.use_fallback_gas_filler,
),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -832,7 +823,6 @@ mod tests {
let mut node = ZombienetNode::new( let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
let genesis = context.genesis_configuration.genesis().unwrap().clone(); let genesis = context.genesis_configuration.genesis().unwrap().clone();
node.init(genesis).unwrap(); node.init(genesis).unwrap();
@@ -946,7 +936,6 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -967,7 +956,6 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -4,7 +4,7 @@ use alloy::{
Provider, SendableTx, Provider, SendableTx,
fillers::{GasFiller, TxFiller}, fillers::{GasFiller, TxFiller},
}, },
transports::{TransportError, TransportResult}, transports::TransportResult,
}; };
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding) // Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
@@ -17,7 +17,6 @@ pub struct FallbackGasFiller {
default_gas_limit: u64, default_gas_limit: u64,
default_max_fee_per_gas: u128, default_max_fee_per_gas: u128,
default_priority_fee: u128, default_priority_fee: u128,
use_fallback_gas_filler: bool,
} }
impl FallbackGasFiller { impl FallbackGasFiller {
@@ -25,41 +24,19 @@ impl FallbackGasFiller {
default_gas_limit: u64, default_gas_limit: u64,
default_max_fee_per_gas: u128, default_max_fee_per_gas: u128,
default_priority_fee: u128, default_priority_fee: u128,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
Self { Self {
inner: GasFiller, inner: GasFiller,
default_gas_limit, default_gas_limit,
default_max_fee_per_gas, default_max_fee_per_gas,
default_priority_fee, default_priority_fee,
use_fallback_gas_filler,
} }
} }
pub fn with_default_gas_limit(mut self, default_gas_limit: u64) -> Self {
self.default_gas_limit = default_gas_limit;
self
}
pub fn with_default_max_fee_per_gas(mut self, default_max_fee_per_gas: u128) -> Self {
self.default_max_fee_per_gas = default_max_fee_per_gas;
self
}
pub fn with_default_priority_fee(mut self, default_priority_fee: u128) -> Self {
self.default_priority_fee = default_priority_fee;
self
}
pub fn with_use_fallback_gas_filler(mut self, use_fallback_gas_filler: bool) -> Self {
self.use_fallback_gas_filler = use_fallback_gas_filler;
self
}
} }
impl Default for FallbackGasFiller { impl Default for FallbackGasFiller {
fn default() -> Self { fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000, true) FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
} }
} }
@@ -87,12 +64,7 @@ where
Ok(fill) => Ok(Some(fill)), Ok(fill) => Ok(Some(fill)),
Err(err) => { Err(err) => {
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback"); tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
Ok(None)
if !self.use_fallback_gas_filler {
Err(err)
} else {
Ok(None)
}
} }
} }
} }
@@ -114,17 +86,13 @@ where
} }
} }
Ok(tx) Ok(tx)
} else if self.use_fallback_gas_filler { } else {
if let Some(builder) = tx.as_mut_builder() { if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit); builder.set_gas_limit(self.default_gas_limit);
builder.set_max_fee_per_gas(self.default_max_fee_per_gas); builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
builder.set_max_priority_fee_per_gas(self.default_priority_fee); builder.set_max_priority_fee_per_gas(self.default_priority_fee);
} }
Ok(tx) Ok(tx)
} else {
Err(TransportError::UnsupportedFeature(
"Fallback gas filler is disabled and we're attempting to do a gas estimate on a failing transaction",
))
} }
} }
} }
+3 -3
View File
@@ -51,7 +51,7 @@ impl ReportAggregator {
} }
} }
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) { pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
let reporter = self let reporter = self
.runner_tx .runner_tx
.take() .take()
@@ -60,7 +60,7 @@ impl ReportAggregator {
(reporter, async move { self.aggregate().await }) (reporter, async move { self.aggregate().await })
} }
async fn aggregate(mut self) -> Result<Report> { async fn aggregate(mut self) -> Result<()> {
debug!("Starting to aggregate report"); debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await { while let Some(event) = self.runner_rx.recv().await {
@@ -152,7 +152,7 @@ impl ReportAggregator {
format!("Failed to serialize report JSON to {}", file_path.display()) format!("Failed to serialize report JSON to {}", file_path.display())
})?; })?;
Ok(self.report) Ok(())
} }
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
+29 -98
View File
@@ -28,7 +28,7 @@ from __future__ import annotations
import json import json
import sys import sys
import csv import csv
from typing import List, Mapping, TypedDict, no_type_check from typing import List, Mapping, TypedDict
class EthereumMinedBlockInformation(TypedDict): class EthereumMinedBlockInformation(TypedDict):
@@ -69,43 +69,7 @@ class MinedBlockInformation(TypedDict):
"""Block-level information for a mined block with both EVM and optional Substrate fields.""" """Block-level information for a mined block with both EVM and optional Substrate fields."""
ethereum_block_information: EthereumMinedBlockInformation ethereum_block_information: EthereumMinedBlockInformation
substrate_block_information: SubstrateMinedBlockInformation | None substrate_block_information: SubstrateMinedBlockInformation
def substrate_block_information_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["ref_time"]
def substrate_block_information_max_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_ref_time"]
def substrate_block_information_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["proof_size"]
def substrate_block_information_max_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_proof_size"]
class Metric(TypedDict): class Metric(TypedDict):
@@ -136,19 +100,8 @@ class Metrics(TypedDict):
transaction_per_second: Metric transaction_per_second: Metric
gas_per_second: Metric gas_per_second: Metric
gas_block_fullness: Metric gas_block_fullness: Metric
ref_time_block_fullness: Metric | None ref_time_block_fullness: Metric
proof_size_block_fullness: Metric | None proof_size_block_fullness: Metric
@no_type_check
def metrics_raw_item(
metrics: Metrics, name: str, target: str, index: int
) -> int | None:
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
try:
return l[index]
except:
return None
class ExecutionReport(TypedDict): class ExecutionReport(TypedDict):
@@ -191,15 +144,12 @@ BlockInformation = TypedDict(
"Transaction Count": int, "Transaction Count": int,
"TPS": int | None, "TPS": int | None,
"GPS": int | None, "GPS": int | None,
"Gas Mined": int, "Ref Time": int,
"Block Gas Limit": int, "Max Ref Time": int,
"Block Fullness Gas": float, "Block Fullness Ref Time": int,
"Ref Time": int | None, "Proof Size": int,
"Max Ref Time": int | None, "Max Proof Size": int,
"Block Fullness Ref Time": int | None, "Block Fullness Proof Size": int,
"Proof Size": int | None,
"Max Proof Size": int | None,
"Block Fullness Proof Size": int | None,
}, },
) )
"""A typed dictionary used to hold all of the block information""" """A typed dictionary used to hold all of the block information"""
@@ -225,7 +175,7 @@ def main() -> None:
report: ReportRoot = load_report(report_path) report: ReportRoot = load_report(report_path)
# TODO: Remove this in the future, but for now, the target is fixed. # TODO: Remove this in the future, but for now, the target is fixed.
target: str = sys.argv[2] target: str = "revive-dev-node-revm-solc"
csv_writer = csv.writer(sys.stdout) csv_writer = csv.writer(sys.stdout)
@@ -238,12 +188,6 @@ def main() -> None:
resolved_blocks: list[BlockInformation] = [] resolved_blocks: list[BlockInformation] = []
for i, block_information in enumerate(blocks_information): for i, block_information in enumerate(blocks_information):
mined_gas: int = block_information["ethereum_block_information"][
"mined_gas"
]
block_gas_limit: int = block_information[
"ethereum_block_information"
]["block_gas_limit"]
resolved_blocks.append( resolved_blocks.append(
{ {
"Block Number": block_information[ "Block Number": block_information[
@@ -272,37 +216,24 @@ def main() -> None:
"raw" "raw"
][target][i - 1] ][target][i - 1]
), ),
"Gas Mined": block_information[ "Ref Time": block_information[
"ethereum_block_information" "substrate_block_information"
]["mined_gas"], ]["ref_time"],
"Block Gas Limit": block_information[ "Max Ref Time": block_information[
"ethereum_block_information" "substrate_block_information"
]["block_gas_limit"], ]["max_ref_time"],
"Block Fullness Gas": mined_gas / block_gas_limit, "Block Fullness Ref Time": execution_report["metrics"][
"Ref Time": substrate_block_information_ref_time( "ref_time_block_fullness"
block_information["substrate_block_information"] ]["raw"][target][i],
), "Proof Size": block_information[
"Max Ref Time": substrate_block_information_max_ref_time( "substrate_block_information"
block_information["substrate_block_information"] ]["proof_size"],
), "Max Proof Size": block_information[
"Block Fullness Ref Time": metrics_raw_item( "substrate_block_information"
execution_report["metrics"], ]["max_proof_size"],
"ref_time_block_fullness", "Block Fullness Proof Size": execution_report["metrics"][
target, "proof_size_block_fullness"
i, ]["raw"][target][i],
),
"Proof Size": substrate_block_information_proof_size(
block_information["substrate_block_information"]
),
"Max Proof Size": substrate_block_information_max_proof_size(
block_information["substrate_block_information"]
),
"Block Fullness Proof Size": metrics_raw_item(
execution_report["metrics"],
"proof_size_block_fullness",
target,
i,
),
} }
) )
+36 -69
View File
@@ -5,54 +5,51 @@ CI. The full models used in the JSON report can be found in the revive different
the models used in this script are just a partial reproduction of the full report models. the models used in this script are just a partial reproduction of the full report models.
""" """
import json, typing, io, sys from typing import TypedDict, Literal, Union
import json, io
class Report(typing.TypedDict): class Report(TypedDict):
context: "Context" context: "Context"
execution_information: dict["MetadataFilePathString", "MetadataFileReport"] execution_information: dict[
"MetadataFilePathString",
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
]
class MetadataFileReport(typing.TypedDict): class Context(TypedDict):
case_reports: dict["CaseIdxString", "CaseReport"]
class CaseReport(typing.TypedDict):
mode_execution_reports: dict["ModeString", "ExecutionReport"]
class ExecutionReport(typing.TypedDict):
status: "TestCaseStatus"
class Context(typing.TypedDict):
Test: "TestContext" Test: "TestContext"
class TestContext(typing.TypedDict): class TestContext(TypedDict):
corpus_configuration: "CorpusConfiguration" corpus_configuration: "CorpusConfiguration"
class CorpusConfiguration(typing.TypedDict): class CorpusConfiguration(TypedDict):
test_specifiers: list["TestSpecifier"] test_specifiers: list["TestSpecifier"]
class CaseStatusSuccess(typing.TypedDict): class CaseReport(TypedDict):
status: typing.Literal["Succeeded"] status: "CaseStatus"
class CaseStatusSuccess(TypedDict):
status: Literal["Succeeded"]
steps_executed: int steps_executed: int
class CaseStatusFailure(typing.TypedDict): class CaseStatusFailure(TypedDict):
status: typing.Literal["Failed"] status: Literal["Failed"]
reason: str reason: str
class CaseStatusIgnored(typing.TypedDict): class CaseStatusIgnored(TypedDict):
status: typing.Literal["Ignored"] status: Literal["Ignored"]
reason: str reason: str
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored] CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
"""A union type of all of the possible statuses that could be reported for a case.""" """A union type of all of the possible statuses that could be reported for a case."""
TestSpecifier = str TestSpecifier = str
@@ -67,12 +64,6 @@ MetadataFilePathString = str
CaseIdxString = str CaseIdxString = str
"""The index of a case as a string. For example '0'""" """The index of a case as a string. For example '0'"""
PlatformString = typing.Union[
typing.Literal["revive-dev-node-revm-solc"],
typing.Literal["revive-dev-node-polkavm-resolc"],
]
"""A string of the platform on which the test was run"""
def path_relative_to_resolc_compiler_test_directory(path: str) -> str: def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
""" """
@@ -87,22 +78,12 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
def main() -> None: def main() -> None:
with open(sys.argv[1], "r") as file: with open("report.json", "r") as file:
report: Report = json.load(file) report: Report = json.load(file)
# Getting the platform string and resolving it into a simpler version of
# itself.
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
if platform_identifier == "revive-dev-node-polkavm-resolc":
platform: str = "PolkaVM"
elif platform_identifier == "revive-dev-node-revm-solc":
platform: str = "REVM"
else:
platform: str = platform_identifier
# Starting the markdown document and adding information to it as we go. # Starting the markdown document and adding information to it as we go.
markdown_document: io.TextIOWrapper = open("report.md", "w") markdown_document: io.TextIOWrapper = open("report.md", "w")
print(f"# Differential Tests Results ({platform})", file=markdown_document) print("# Differential Tests Results", file=markdown_document)
# Getting all of the test specifiers from the report and making them relative to the tests dir. # Getting all of the test specifiers from the report and making them relative to the tests dir.
test_specifiers: list[str] = list( test_specifiers: list[str] = list(
@@ -113,7 +94,7 @@ def main() -> None:
) )
print("## Specified Tests", file=markdown_document) print("## Specified Tests", file=markdown_document)
for test_specifier in test_specifiers: for test_specifier in test_specifiers:
print(f"* ``{test_specifier}``", file=markdown_document) print(f"* `{test_specifier}`", file=markdown_document)
# Counting the total number of test cases, successes, failures, and ignored tests # Counting the total number of test cases, successes, failures, and ignored tests
total_number_of_cases: int = 0 total_number_of_cases: int = 0
@@ -121,13 +102,9 @@ def main() -> None:
total_number_of_failures: int = 0 total_number_of_failures: int = 0
total_number_of_ignores: int = 0 total_number_of_ignores: int = 0
for _, mode_to_case_mapping in report["execution_information"].items(): for _, mode_to_case_mapping in report["execution_information"].items():
for _, case_idx_to_report_mapping in mode_to_case_mapping[ for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for _, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for _, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
total_number_of_cases += 1 total_number_of_cases += 1
if status["status"] == "Succeeded": if status["status"] == "Succeeded":
@@ -167,13 +144,9 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[ for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for case_idx_string, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -210,13 +183,9 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[ for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for case_idx_string, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -225,9 +194,7 @@ def main() -> None:
if status["status"] != "Failed": if status["status"] != "Failed":
continue continue
failure_reason: str = ( failure_reason: str = status["reason"].replace("\n", " ")
status["reason"].replace("\n", " ").replace("|", " ")
)
note: str = "" note: str = ""
modes_where_this_case_succeeded: set[ModeString] = ( modes_where_this_case_succeeded: set[ModeString] = (
@@ -245,7 +212,7 @@ def main() -> None:
f"{metadata_file_path}::{case_idx_string}::{mode_string}" f"{metadata_file_path}::{case_idx_string}::{mode_string}"
) )
print( print(
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |", f"| `{test_specifier}` | `{failure_reason}` | {note} |",
file=markdown_document, file=markdown_document,
) )
print("\n\n</details>", file=markdown_document) print("\n\n</details>", file=markdown_document)