Compare commits

...

6 Commits

Author SHA1 Message Date
Omar Abdulla 61ec3de56a Update the FallbackGasFiller implementation 2026-01-12 03:44:43 +03:00
Omar 08c1572870 Added a CI action for running tests (#219)
* Add a CI action for running tests

* Update the CI action fixing incorrect matrix usage
2026-01-06 14:27:20 +00:00
Omar cd6b7969ac Update tests commit hash (#218) 2025-12-05 07:47:48 +00:00
Omar 78ac7ee381 Fix the Fallback Gas Limiter (#217)
* Add code to disable the fallback gas filler

* Allow benchmarks driver to await tx receipts

* Improve the transaction submission logic

* Update Python Script to process Geth benchmarks
2025-12-04 13:19:48 +00:00
Omar 3edaebdcae Cache the chainspec (#216) 2025-12-03 16:37:44 +00:00
Omar 66feb36b4e Update the number of cached blocks (#215)
* Update the commit hash of the tests

* Update the number of cached blocks in revive-dev-node
2025-11-25 12:06:07 +00:00
16 changed files with 620 additions and 191 deletions
@@ -0,0 +1,105 @@
name: "Run Revive Differential Tests"
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
inputs:
# Setup arguments & environment
polkadot-sdk-path:
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
required: false
default: "."
type: string
cargo-command:
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
required: false
default: "cargo"
type: string
revive-differential-tests-ref:
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
required: false
default: "main"
type: string
resolc-version:
description: "The version of resolc to install and use in tests."
required: false
default: "0.5.0"
type: string
use-compilation-caches:
description: "Controls if the compilation caches will be used for the test run or not."
required: false
default: true
type: boolean
# Test Execution Arguments
platform:
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
required: true
type: string
runs:
using: "composite"
steps:
- name: Checkout the Differential Tests Repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: paritytech/revive-differential-tests
ref: ${{ inputs['revive-differential-tests-ref'] }}
path: revive-differential-tests
submodules: recursive
- name: Installing the Latest Resolc
shell: bash
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
run: |
VERSION="${{ inputs['resolc-version'] }}"
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
echo "Downloading resolc v$VERSION from $ASSET_URL"
curl -Lsf --show-error -o resolc "$ASSET_URL"
chmod +x resolc
./resolc --version
- name: Installing Retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
- name: Creating a workdir for retester
shell: bash
run: mkdir workdir
- name: Downloading & Initializing the compilation caches
shell: bash
if: ${{ inputs['use-compilation-caches'] == true }}
run: |
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
- name: Building the dependencies from the Polkadot SDK
shell: bash
run: ${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
- name: Running the Differential Tests
shell: bash
run: |
${{ inputs['cargo-command'] }} run --locked --manifest-path revive-differential-tests/Cargo.toml -- test \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
--platform ${{ inputs['platform'] }} \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 10 \
--concurrency.number-of-concurrent-tasks 100 \
--working-directory ./workdir \
--revive-dev-node.consensus manual-seal-200 \
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
--resolc.path ./resolc
- name: Creating a markdown report of the test execution
shell: bash
if: ${{ always() }}
run: |
mv ./workdir/*.json report.json
python3 revive-differential-tests/scripts/process-differential-tests-report.py report.json ${{ inputs['platform'] }}
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
if: ${{ always() }}
with:
name: report-${{ inputs['platform'] }}.md
path: report.md
- name: Posting the report as a comment on the PR
uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405
if: ${{ always() }}
with:
header: diff-tests-report-${{ inputs['platform'] }}
path: report.md
+17
View File
@@ -375,6 +375,23 @@ pub struct BenchmarkingContext {
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
pub default_repetition_count: usize,
/// This transaction controls whether the benchmarking driver should await for transactions to
/// be included in a block before moving on to the next transaction in the sequence or not.
///
/// This behavior is useful in certain cases and not so useful in others. For example, in some
/// repetition block if there's some kind of relationship between txs n and n+1 (for example a
/// mint then a transfer) then you would want to wait for the minting to happen and then move on
/// to the transfers. On the other hand, if there's no relationship between the transactions n
/// and n+1 (e.g., mint and another mint of a different token) then awaiting the first mint to
/// be included in a block might not seem necessary.
///
/// By default, this behavior is set to false to allow the benchmarking framework to saturate
/// the node's mempool as quickly as possible. However, as explained above, there are cases
/// where it's needed and certain workloads where failure to provide this argument would lead to
/// inaccurate results.
#[arg(long)]
pub await_transaction_inclusion: bool,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
@@ -1,6 +1,5 @@
use std::{
collections::HashMap,
ops::ControlFlow,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
@@ -13,6 +12,7 @@ use alloy::{
json_abi::JsonAbi,
network::{Ethereum, TransactionBuilder},
primitives::{Address, TxHash, U256},
providers::Provider,
rpc::types::{
TransactionReceipt, TransactionRequest,
trace::geth::{
@@ -22,12 +22,9 @@ use alloy::{
},
};
use anyhow::{Context as _, Result, bail};
use futures::TryFutureExt;
use futures::{FutureExt as _, TryFutureExt};
use indexmap::IndexMap;
use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
types::PrivateKeyAllocator,
};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent},
steps::{
@@ -37,7 +34,7 @@ use revive_dt_format::{
traits::{ResolutionContext, ResolverApi},
};
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use tracing::{Span, debug, error, field::display, info, instrument};
use crate::{
differential_benchmarks::{ExecutionState, WatcherEvent},
@@ -73,6 +70,10 @@ pub struct Driver<'a, I> {
/// The number of steps that were executed on the driver.
steps_executed: usize,
/// This function controls if the driver should wait for transactions to be included in a block
/// or not before proceeding forward.
await_transaction_inclusion: bool,
/// This is the queue of steps that are to be executed by the driver for this test case. Each
/// time `execute_step` is called one of the steps is executed.
steps_iterator: I,
@@ -89,6 +90,7 @@ where
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
cached_compiler: &CachedCompiler<'a>,
watcher_tx: UnboundedSender<WatcherEvent>,
await_transaction_inclusion: bool,
steps: I,
) -> Result<Self> {
let mut this = Driver {
@@ -104,6 +106,7 @@ where
execution_state: ExecutionState::empty(),
steps_executed: 0,
steps_iterator: steps,
await_transaction_inclusion,
watcher_tx,
};
this.init_execution_state(cached_compiler)
@@ -166,7 +169,7 @@ where
code,
);
let receipt = self
.execute_transaction(tx, None)
.execute_transaction(tx, None, Duration::from_secs(5 * 60))
.and_then(|(_, receipt_fut)| receipt_fut)
.await
.inspect_err(|err| {
@@ -365,7 +368,30 @@ where
let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?;
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
let (tx_hash, receipt_future) = self
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
.await?;
if self.await_transaction_inclusion {
let receipt = receipt_future
.await
.context("Failed while waiting for transaction inclusion in block")?;
if !receipt.status() {
error!(
?tx,
tx.hash = %receipt.transaction_hash,
?receipt,
"Encountered a failing benchmark transaction"
);
bail!(
"Encountered a failing transaction in benchmarks: {}",
receipt.transaction_hash
)
}
}
Ok(tx_hash)
}
}
}
@@ -466,6 +492,7 @@ where
.collect::<Vec<_>>();
steps.into_iter()
},
await_transaction_inclusion: self.await_transaction_inclusion,
watcher_tx: self.watcher_tx.clone(),
})
.map(|driver| driver.execute_all());
@@ -632,7 +659,7 @@ where
};
let receipt = match self
.execute_transaction(tx, step_path)
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60))
.and_then(|(_, receipt_fut)| receipt_fut)
.await
{
@@ -677,18 +704,33 @@ where
#[instrument(
level = "info",
skip_all,
fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
fields(
driver_id = self.driver_id,
transaction = ?transaction,
transaction_hash = tracing::field::Empty
),
err(Debug)
)]
async fn execute_transaction(
&self,
transaction: TransactionRequest,
step_path: Option<&StepPath>,
receipt_wait_duration: Duration,
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node;
let transaction_hash = node
.submit_transaction(transaction)
let provider = node.provider().await.context("Creating provider failed")?;
let pending_transaction_builder = provider
.send_transaction(transaction)
.await
.context("Failed to submit transaction")?;
let transaction_hash = *pending_transaction_builder.tx_hash();
let receipt_future = pending_transaction_builder
.with_timeout(Some(receipt_wait_duration))
.with_required_confirmations(2)
.get_receipt()
.map(|res| res.context("Failed to get the receipt of the transaction"));
Span::current().record("transaction_hash", display(transaction_hash));
info!("Submitted transaction");
@@ -701,28 +743,7 @@ where
.context("Failed to send the transaction hash to the watcher")?;
};
Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
Ok((transaction_hash, receipt_future))
}
// endregion:Transaction Execution
}
@@ -160,6 +160,7 @@ pub async fn handle_differential_benchmarks(
private_key_allocator,
cached_compiler.as_ref(),
watcher_tx.clone(),
context.await_transaction_inclusion,
test_definition
.case
.steps_iterator_for_benchmarks(context.default_repetition_count)
@@ -139,23 +139,18 @@ impl Watcher {
break;
}
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
// Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for.
let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await;
let mut relevant_transactions_observed = 0;
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
let Some((step_path, submission_time)) =
watch_for_transaction_hashes.remove(tx_hash)
else {
continue;
};
relevant_transactions_observed += 1;
let transaction_information = TransactionInformation {
transaction_hash: *tx_hash,
submission_timestamp: submission_time
@@ -172,6 +167,14 @@ impl Watcher {
)
.expect("Can't fail")
}
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
relevant_transactions_observed,
remaining_transactions = watch_for_transaction_hashes.len(),
"Observed a block"
);
}
info!("Watcher's Block Watching Task Finished");
+14 -4
View File
@@ -91,7 +91,8 @@ impl Platform for GethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = GethNode::new(context);
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = GethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<GethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -145,7 +146,8 @@ impl Platform for LighthouseGethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = LighthouseGethNode::new(context);
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -206,12 +208,14 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
&eth_rpc_connection_strings,
use_fallback_gas_filler,
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -274,12 +278,14 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
&eth_rpc_connection_strings,
use_fallback_gas_filler,
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -338,7 +344,9 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context);
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -395,7 +403,9 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context);
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
+37 -6
View File
@@ -2,9 +2,9 @@ mod differential_benchmarks;
mod differential_tests;
mod helpers;
use anyhow::Context as _;
use anyhow::{Context as _, bail};
use clap::Parser;
use revive_dt_report::ReportAggregator;
use revive_dt_report::{ReportAggregator, TestCaseStatus};
use schemars::schema_for;
use tracing::{info, level_filters::LevelFilter};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -57,8 +57,22 @@ fn main() -> anyhow::Result<()> {
let differential_tests_handling_task =
handle_differential_tests(*context, reporter);
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
.await?;
let (_, report) = futures::future::try_join(
differential_tests_handling_task,
report_aggregator_task,
)
.await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some tests failed")
}
Ok(())
}),
@@ -71,12 +85,23 @@ fn main() -> anyhow::Result<()> {
let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter);
futures::future::try_join(
let (_, report) = futures::future::try_join(
differential_benchmarks_handling_task,
report_aggregator_task,
)
.await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some benchmarks failed")
}
Ok(())
}),
Context::ExportGenesis(ref export_genesis_context) => {
@@ -85,11 +110,17 @@ fn main() -> anyhow::Result<()> {
let genesis_json = serde_json::to_string_pretty(&genesis)
.context("Failed to serialize the genesis to JSON")?;
println!("{genesis_json}");
Ok(())
}
Context::ExportJsonSchema => {
let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
println!(
"{}",
serde_json::to_string_pretty(&schema)
.context("Failed to export the JSON schema")?
);
Ok(())
}
}
+6 -2
View File
@@ -76,6 +76,7 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
}
impl GethNode {
@@ -100,6 +101,7 @@ impl GethNode {
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ Clone,
use_fallback_gas_filler: bool,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -126,6 +128,7 @@ impl GethNode {
wallet: wallet.clone(),
nonce_manager: Default::default(),
provider: Default::default(),
use_fallback_gas_filler,
}
}
@@ -246,7 +249,8 @@ impl GethNode {
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(),
FallbackGasFiller::default(),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -742,7 +746,7 @@ mod tests {
fn new_node() -> (TestExecutionContext, GethNode) {
let context = test_config();
let mut node = GethNode::new(&context);
let mut node = GethNode::new(&context, true);
node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node")
.spawn_process()
@@ -106,6 +106,8 @@ pub struct LighthouseGethNode {
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
}
impl LighthouseGethNode {
@@ -127,6 +129,7 @@ impl LighthouseGethNode {
+ AsRef<WalletConfiguration>
+ AsRef<KurtosisConfiguration>
+ Clone,
use_fallback_gas_filler: bool,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -176,6 +179,7 @@ impl LighthouseGethNode {
nonce_manager: Default::default(),
persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(),
use_fallback_gas_filler,
}
}
@@ -374,7 +378,8 @@ impl LighthouseGethNode {
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.ws_connection_string.as_str(),
FallbackGasFiller::default(),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -1152,7 +1157,7 @@ mod tests {
let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config();
let mut node = LighthouseGethNode::new(&context);
let mut node = LighthouseGethNode::new(&context, true);
node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node")
.spawn_process()
@@ -4,7 +4,7 @@ use std::{
pin::Pin,
process::{Command, Stdio},
sync::{
Arc,
Arc, Mutex,
atomic::{AtomicU32, Ordering},
},
time::Duration,
@@ -32,7 +32,7 @@ use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
use serde_json::json;
use serde_json::{Value, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
@@ -47,7 +47,7 @@ use tracing::{instrument, trace};
use crate::{
Node,
constants::{CHAIN_ID, INITIAL_BALANCE},
constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
@@ -57,6 +57,9 @@ use crate::{
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// The number of blocks that should be cached by the revive-dev-node and the eth-rpc.
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
/// A node implementation for Substrate based chains. Currently, this supports either substrate
/// or the revive-dev-node which is done by changing the path and some of the other arguments passed
/// to the command.
@@ -76,6 +79,7 @@ pub struct SubstrateNode {
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>,
use_fallback_gas_filler: bool,
}
impl SubstrateNode {
@@ -102,6 +106,7 @@ impl SubstrateNode {
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>,
existing_connection_strings: &[String],
use_fallback_gas_filler: bool,
) -> Self {
let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
@@ -134,10 +139,13 @@ impl SubstrateNode {
nonce_manager: Default::default(),
provider: Default::default(),
consensus,
use_fallback_gas_filler,
}
}
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
static CHAINSPEC_MUTEX: Mutex<Option<Value>> = Mutex::new(None);
if !self.rpc_url.is_empty() {
return Ok(self);
}
@@ -156,12 +164,22 @@ impl SubstrateNode {
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
trace!("Creating the node genesis");
let chainspec_json = Self::node_genesis(
&self.node_binary,
&self.export_chainspec_command,
&self.wallet,
)
.context("Failed to prepare the chainspec command")?;
let chainspec_json = {
let mut chainspec_mutex = CHAINSPEC_MUTEX.lock().expect("Poisoned");
match chainspec_mutex.as_ref() {
Some(chainspec_json) => chainspec_json.clone(),
None => {
let chainspec_json = Self::node_genesis(
&self.node_binary,
&self.export_chainspec_command,
&self.wallet,
)
.context("Failed to prepare the chainspec command")?;
*chainspec_mutex = Some(chainspec_json.clone());
chainspec_json
}
}
};
trace!("Writing the node genesis");
serde_json::to_writer_pretty(
@@ -212,6 +230,8 @@ impl SubstrateNode {
.arg(u32::MAX.to_string())
.arg("--pool-kbytes")
.arg(u32::MAX.to_string())
.arg("--state-pruning")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
@@ -252,9 +272,9 @@ impl SubstrateNode {
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--index-last-n-blocks")
.arg(1_000u32.to_string())
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.arg("--cache-size")
.arg(1_000u32.to_string())
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
@@ -307,8 +327,9 @@ impl SubstrateNode {
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 50_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(CHAIN_ID)),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
@@ -808,6 +829,7 @@ mod tests {
None,
&context,
&[],
true,
);
node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node")
@@ -879,6 +901,7 @@ mod tests {
None,
&context,
&[],
true,
);
// Call `init()`
@@ -114,6 +114,8 @@ pub struct ZombienetNode {
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
}
impl ZombienetNode {
@@ -137,6 +139,7 @@ impl ZombienetNode {
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>,
use_fallback_gas_filler: bool,
) -> Self {
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path
@@ -164,6 +167,7 @@ impl ZombienetNode {
connection_string: String::new(),
node_rpc_port: None,
provider: Default::default(),
use_fallback_gas_filler,
}
}
@@ -330,7 +334,8 @@ impl ZombienetNode {
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -823,6 +828,7 @@ mod tests {
let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
true,
);
let genesis = context.genesis_configuration.genesis().unwrap().clone();
node.init(genesis).unwrap();
@@ -936,6 +942,7 @@ mod tests {
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
true,
);
// Act
@@ -956,6 +963,7 @@ mod tests {
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
true,
);
// Act
@@ -1,42 +1,71 @@
use std::{borrow::Cow, fmt::Display};
use alloy::{
eips::BlockNumberOrTag,
network::{Network, TransactionBuilder},
providers::{
Provider, SendableTx,
fillers::{GasFiller, TxFiller},
ext::DebugApi,
fillers::{GasFillable, GasFiller, TxFiller},
},
transports::TransportResult,
rpc::types::trace::geth::{
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
GethDebugTracingOptions,
},
transports::{RpcError, TransportResult},
};
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
#[derive(Clone, Debug)]
/// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions.
///
/// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
/// failing transactions. In this codebase, this is very important since the MatterLabs tests
/// expect some transactions in the test suite revert. Since we're expected to run a number of
/// assertions on these reverting transactions we must commit them to the ledger.
///
/// Therefore, this struct does the following:
///
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
/// transaction needs until it reverts.
/// 3. We fill in these values (either the success or failure case) into the transaction.
///
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
///
/// We then fill in these values into the transaction.
///
/// The previous implementation of this fallback gas filler relied on making use of default values
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
#[derive(Clone, Copy, Debug)]
pub struct FallbackGasFiller {
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
inner: GasFiller,
default_gas_limit: u64,
default_max_fee_per_gas: u128,
default_priority_fee: u128,
/// A [`bool`] that controls if the fallback mechanism is enabled or not.
enable_fallback_mechanism: bool,
}
impl FallbackGasFiller {
pub fn new(
default_gas_limit: u64,
default_max_fee_per_gas: u128,
default_priority_fee: u128,
) -> Self {
pub fn new() -> Self {
Self {
inner: GasFiller,
default_gas_limit,
default_max_fee_per_gas,
default_priority_fee,
inner: Default::default(),
enable_fallback_mechanism: true,
}
}
}
impl Default for FallbackGasFiller {
fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
pub fn with_fallback_mechanism(mut self, enable: bool) -> Self {
self.enable_fallback_mechanism = enable;
self
}
pub fn with_fallback_mechanism_enabled(self) -> Self {
self.with_fallback_mechanism(true)
}
pub fn with_fallback_mechanism_disabled(self) -> Self {
self.with_fallback_mechanism(false)
}
}
@@ -44,27 +73,89 @@ impl<N> TxFiller<N> for FallbackGasFiller
where
N: Network,
{
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
type Fillable = <GasFiller as TxFiller<N>>::Fillable;
fn status(
&self,
tx: &<N as Network>::TransactionRequest,
) -> alloy::providers::fillers::FillerControlFlow {
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
TxFiller::<N>::status(&self.inner, tx)
}
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
fn fill_sync(&self, _: &mut SendableTx<N>) {}
async fn prepare<P: Provider<N>>(
&self,
provider: &P,
tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> {
match self.inner.prepare(provider, tx).await {
Ok(fill) => Ok(Some(fill)),
Err(err) => {
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
Ok(None)
match (
self.inner.prepare(provider, tx).await,
self.enable_fallback_mechanism,
) {
// Return the same thing if either this calls succeeds, or if the call falls and the
// fallback mechanism is disabled.
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
(Err(..), true) => {
// Perform a trace of the transaction.
let trace = provider
.debug_trace_call(
tx.clone(),
BlockNumberOrTag::Latest.into(),
GethDebugTracingCallOptions {
tracing_options: GethDebugTracingOptions {
tracer: Some(GethDebugTracerType::BuiltInTracer(
GethDebugBuiltInTracerType::CallTracer,
)),
..Default::default()
},
state_overrides: Default::default(),
block_overrides: Default::default(),
},
)
.await?
.try_into_call_frame()
.map_err(|err| {
RpcError::LocalUsageError(
FallbackGasFillerError::new(format!(
"Expected a callframe trace, but got: {err:?}"
))
.boxed(),
)
})?;
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
RpcError::LocalUsageError(
FallbackGasFillerError::new(
"Transaction trace returned a value of gas used that exceeds u64",
)
.boxed(),
)
})?;
let gas_limit = gas_used.saturating_mul(120) / 100;
if let Some(gas_price) = tx.gas_price() {
return Ok(GasFillable::Legacy {
gas_limit,
gas_price,
});
}
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
{
alloy::eips::eip1559::Eip1559Estimation {
max_fee_per_gas,
max_priority_fee_per_gas,
}
} else {
provider.estimate_eip1559_fees().await?
};
Ok(GasFillable::Eip1559 {
gas_limit,
estimate,
})
}
}
}
@@ -72,27 +163,35 @@ where
async fn fill(
&self,
fillable: Self::Fillable,
mut tx: alloy::providers::SendableTx<N>,
tx: SendableTx<N>,
) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable {
let mut tx = self.inner.fill(fill, tx).await?;
if let Some(builder) = tx.as_mut_builder() {
if let Some(estimated) = builder.gas_limit() {
let padded = estimated
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
}
Ok(tx)
}
self.inner.fill(fillable, tx).await
}
}
impl Default for FallbackGasFiller {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct FallbackGasFillerError(Cow<'static, str>);
impl FallbackGasFillerError {
pub fn new(string: impl Into<Cow<'static, str>>) -> Self {
Self(string.into())
}
pub fn boxed(self) -> Box<Self> {
Box::new(self)
}
}
impl Display for FallbackGasFillerError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.0, f)
}
}
impl std::error::Error for FallbackGasFillerError {}
+3 -3
View File
@@ -51,7 +51,7 @@ impl ReportAggregator {
}
}
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
let reporter = self
.runner_tx
.take()
@@ -60,7 +60,7 @@ impl ReportAggregator {
(reporter, async move { self.aggregate().await })
}
async fn aggregate(mut self) -> Result<()> {
async fn aggregate(mut self) -> Result<Report> {
debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await {
@@ -152,7 +152,7 @@ impl ReportAggregator {
format!("Failed to serialize report JSON to {}", file_path.display())
})?;
Ok(())
Ok(self.report)
}
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
+98 -29
View File
@@ -28,7 +28,7 @@ from __future__ import annotations
import json
import sys
import csv
from typing import List, Mapping, TypedDict
from typing import List, Mapping, TypedDict, no_type_check
class EthereumMinedBlockInformation(TypedDict):
@@ -69,7 +69,43 @@ class MinedBlockInformation(TypedDict):
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
ethereum_block_information: EthereumMinedBlockInformation
substrate_block_information: SubstrateMinedBlockInformation
substrate_block_information: SubstrateMinedBlockInformation | None
def substrate_block_information_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["ref_time"]
def substrate_block_information_max_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_ref_time"]
def substrate_block_information_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["proof_size"]
def substrate_block_information_max_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_proof_size"]
class Metric(TypedDict):
@@ -100,8 +136,19 @@ class Metrics(TypedDict):
transaction_per_second: Metric
gas_per_second: Metric
gas_block_fullness: Metric
ref_time_block_fullness: Metric
proof_size_block_fullness: Metric
ref_time_block_fullness: Metric | None
proof_size_block_fullness: Metric | None
@no_type_check
def metrics_raw_item(
metrics: Metrics, name: str, target: str, index: int
) -> int | None:
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
try:
return l[index]
except:
return None
class ExecutionReport(TypedDict):
@@ -144,12 +191,15 @@ BlockInformation = TypedDict(
"Transaction Count": int,
"TPS": int | None,
"GPS": int | None,
"Ref Time": int,
"Max Ref Time": int,
"Block Fullness Ref Time": int,
"Proof Size": int,
"Max Proof Size": int,
"Block Fullness Proof Size": int,
"Gas Mined": int,
"Block Gas Limit": int,
"Block Fullness Gas": float,
"Ref Time": int | None,
"Max Ref Time": int | None,
"Block Fullness Ref Time": int | None,
"Proof Size": int | None,
"Max Proof Size": int | None,
"Block Fullness Proof Size": int | None,
},
)
"""A typed dictionary used to hold all of the block information"""
@@ -175,7 +225,7 @@ def main() -> None:
report: ReportRoot = load_report(report_path)
# TODO: Remove this in the future, but for now, the target is fixed.
target: str = "revive-dev-node-revm-solc"
target: str = sys.argv[2]
csv_writer = csv.writer(sys.stdout)
@@ -188,6 +238,12 @@ def main() -> None:
resolved_blocks: list[BlockInformation] = []
for i, block_information in enumerate(blocks_information):
mined_gas: int = block_information["ethereum_block_information"][
"mined_gas"
]
block_gas_limit: int = block_information[
"ethereum_block_information"
]["block_gas_limit"]
resolved_blocks.append(
{
"Block Number": block_information[
@@ -216,24 +272,37 @@ def main() -> None:
"raw"
][target][i - 1]
),
"Ref Time": block_information[
"substrate_block_information"
]["ref_time"],
"Max Ref Time": block_information[
"substrate_block_information"
]["max_ref_time"],
"Block Fullness Ref Time": execution_report["metrics"][
"ref_time_block_fullness"
]["raw"][target][i],
"Proof Size": block_information[
"substrate_block_information"
]["proof_size"],
"Max Proof Size": block_information[
"substrate_block_information"
]["max_proof_size"],
"Block Fullness Proof Size": execution_report["metrics"][
"proof_size_block_fullness"
]["raw"][target][i],
"Gas Mined": block_information[
"ethereum_block_information"
]["mined_gas"],
"Block Gas Limit": block_information[
"ethereum_block_information"
]["block_gas_limit"],
"Block Fullness Gas": mined_gas / block_gas_limit,
"Ref Time": substrate_block_information_ref_time(
block_information["substrate_block_information"]
),
"Max Ref Time": substrate_block_information_max_ref_time(
block_information["substrate_block_information"]
),
"Block Fullness Ref Time": metrics_raw_item(
execution_report["metrics"],
"ref_time_block_fullness",
target,
i,
),
"Proof Size": substrate_block_information_proof_size(
block_information["substrate_block_information"]
),
"Max Proof Size": substrate_block_information_max_proof_size(
block_information["substrate_block_information"]
),
"Block Fullness Proof Size": metrics_raw_item(
execution_report["metrics"],
"proof_size_block_fullness",
target,
i,
),
}
)
+69 -36
View File
@@ -5,51 +5,54 @@ CI. The full models used in the JSON report can be found in the revive different
the models used in this script are just a partial reproduction of the full report models.
"""
from typing import TypedDict, Literal, Union
import json, io
import json, typing, io, sys
class Report(TypedDict):
class Report(typing.TypedDict):
context: "Context"
execution_information: dict[
"MetadataFilePathString",
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
]
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
class Context(TypedDict):
class MetadataFileReport(typing.TypedDict):
case_reports: dict["CaseIdxString", "CaseReport"]
class CaseReport(typing.TypedDict):
mode_execution_reports: dict["ModeString", "ExecutionReport"]
class ExecutionReport(typing.TypedDict):
status: "TestCaseStatus"
class Context(typing.TypedDict):
Test: "TestContext"
class TestContext(TypedDict):
class TestContext(typing.TypedDict):
corpus_configuration: "CorpusConfiguration"
class CorpusConfiguration(TypedDict):
class CorpusConfiguration(typing.TypedDict):
test_specifiers: list["TestSpecifier"]
class CaseReport(TypedDict):
status: "CaseStatus"
class CaseStatusSuccess(TypedDict):
status: Literal["Succeeded"]
class CaseStatusSuccess(typing.TypedDict):
status: typing.Literal["Succeeded"]
steps_executed: int
class CaseStatusFailure(TypedDict):
status: Literal["Failed"]
class CaseStatusFailure(typing.TypedDict):
status: typing.Literal["Failed"]
reason: str
class CaseStatusIgnored(TypedDict):
status: Literal["Ignored"]
class CaseStatusIgnored(typing.TypedDict):
status: typing.Literal["Ignored"]
reason: str
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
"""A union type of all of the possible statuses that could be reported for a case."""
TestSpecifier = str
@@ -64,6 +67,12 @@ MetadataFilePathString = str
CaseIdxString = str
"""The index of a case as a string. For example '0'"""
PlatformString = typing.Union[
typing.Literal["revive-dev-node-revm-solc"],
typing.Literal["revive-dev-node-polkavm-resolc"],
]
"""A string of the platform on which the test was run"""
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
"""
@@ -78,12 +87,22 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
def main() -> None:
with open("report.json", "r") as file:
with open(sys.argv[1], "r") as file:
report: Report = json.load(file)
# Getting the platform string and resolving it into a simpler version of
# itself.
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
if platform_identifier == "revive-dev-node-polkavm-resolc":
platform: str = "PolkaVM"
elif platform_identifier == "revive-dev-node-revm-solc":
platform: str = "REVM"
else:
platform: str = platform_identifier
# Starting the markdown document and adding information to it as we go.
markdown_document: io.TextIOWrapper = open("report.md", "w")
print("# Differential Tests Results", file=markdown_document)
print(f"# Differential Tests Results ({platform})", file=markdown_document)
# Getting all of the test specifiers from the report and making them relative to the tests dir.
test_specifiers: list[str] = list(
@@ -94,7 +113,7 @@ def main() -> None:
)
print("## Specified Tests", file=markdown_document)
for test_specifier in test_specifiers:
print(f"* `{test_specifier}`", file=markdown_document)
print(f"* ``{test_specifier}``", file=markdown_document)
# Counting the total number of test cases, successes, failures, and ignored tests
total_number_of_cases: int = 0
@@ -102,9 +121,13 @@ def main() -> None:
total_number_of_failures: int = 0
total_number_of_ignores: int = 0
for _, mode_to_case_mapping in report["execution_information"].items():
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
for _, case_report in case_idx_to_report_mapping.items():
status: CaseStatus = case_report["status"]
for _, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for _, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
total_number_of_cases += 1
if status["status"] == "Succeeded":
@@ -144,9 +167,13 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[
"execution_information"
].items():
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
for case_idx_string, case_report in case_idx_to_report_mapping.items():
status: CaseStatus = case_report["status"]
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
)
@@ -183,9 +210,13 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[
"execution_information"
].items():
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
for case_idx_string, case_report in case_idx_to_report_mapping.items():
status: CaseStatus = case_report["status"]
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
"case_reports"
].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
)
@@ -194,7 +225,9 @@ def main() -> None:
if status["status"] != "Failed":
continue
failure_reason: str = status["reason"].replace("\n", " ")
failure_reason: str = (
status["reason"].replace("\n", " ").replace("|", " ")
)
note: str = ""
modes_where_this_case_succeeded: set[ModeString] = (
@@ -212,7 +245,7 @@ def main() -> None:
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
)
print(
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
file=markdown_document,
)
print("\n\n</details>", file=markdown_document)