Compare commits

...

15 Commits

Author SHA1 Message Date
Marian Radu cec992f80a Update Cargo.lock 2026-01-19 17:03:38 +02:00
Marian Radu b53550e43e Merge branch 'main' into bugfix/skip-contracts-that-have-no-byte-code 2026-01-19 16:57:57 +02:00
Marian Radu 9491263857 Add PVM heap-size and stack-size configuration parameters for resolc (#226)
* Update revive compiler dependencies

* Inject polkavm settings into resolc standard JSON input

* Add PVM heap-size and stack-size configuration for resolc
2026-01-19 10:05:37 +00:00
Marian Radu b41c0e61c0 Skip contracts that have no bytecode (e.g., abstract contracts) 2026-01-17 11:01:59 +02:00
Omar 9d1c71756f Update Report Processor (#227)
* Add a report processing tool

* Add expectations tests to the CI action

* Fix an issue with CI

* Fix CI

* Fix the path of the workdir in CI

* Fix CI issue with the paths

* Update the format of the expectations file

* Update report processor to only include failures
2026-01-16 16:21:36 +00:00
Omar 8b0a0c3518 Update retester CI to check expectations (#225)
* Add a report processing tool

* Add expectations tests to the CI action

* Fix an issue with CI

* Fix CI

* Fix the path of the workdir in CI

* Fix CI issue with the paths

* Update the format of the expectations file
2026-01-15 15:32:44 +00:00
Omar 94b04c0189 Change the input for the polkadot-omni-node to be a path to chain-spec (#223)
* Change the input for the polkadot-omni-node to be a path to chain-spec

* Remove unneeded dependency
2026-01-14 11:59:21 +00:00
Omar 2d3602aaed Add a retry layer to all providers (#224)
* Add a `ReceiptRetryLayer` for providers

* Fix the retry layer

* Rename the retry layer

* Remove outdated polling function

* Remoe unneeded dependencies
2026-01-13 19:39:47 +00:00
Omar d38e6d419d Add support for the polkadot-omni-node (#222)
* Add configuration for the polkadot-omni-node

* Add support for the polkadot-omni-node

* Add CI inputs for polkadot-omni-node
2026-01-12 02:49:53 +00:00
Omar 62478ee2f9 Update the FallbackGasFiller implementation (#221) 2026-01-12 01:53:15 +00:00
Marian Radu dda369c8b5 Disable recursion limit when parsing resolc compilation output. (#220) 2026-01-09 16:22:58 +00:00
Omar 08c1572870 Added a CI action for running tests (#219)
* Add a CI action for running tests

* Update the CI action fixing incorrect matrix usage
2026-01-06 14:27:20 +00:00
Omar cd6b7969ac Update tests commit hash (#218) 2025-12-05 07:47:48 +00:00
Omar 78ac7ee381 Fix the Fallback Gas Limiter (#217)
* Add code to disable the fallback gas filler

* Allow benchmarks driver to await tx receipts

* Improve the transaction submission logic

* Update Python Script to process Geth benchmarks
2025-12-04 13:19:48 +00:00
Omar 3edaebdcae Cache the chainspec (#216) 2025-12-03 16:37:44 +00:00
35 changed files with 3687 additions and 1752 deletions
@@ -0,0 +1,141 @@
name: "Run Revive Differential Tests"
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
inputs:
# Setup arguments & environment
polkadot-sdk-path:
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
required: false
default: "."
type: string
cargo-command:
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
required: false
default: "cargo"
type: string
revive-differential-tests-ref:
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
required: false
default: "main"
type: string
resolc-version:
description: "The version of resolc to install and use in tests."
required: false
default: "0.5.0"
type: string
use-compilation-caches:
description: "Controls if the compilation caches will be used for the test run or not."
required: false
default: true
type: boolean
# Test Execution Arguments
platform:
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
required: true
type: string
polkadot-omnichain-node-chain-spec-path:
description: "The path of the chain-spec of the chain we're spawning'. This is only required if the polkadot-omni-node is one of the selected platforms."
required: false
type: string
polkadot-omnichain-node-parachain-id:
description: "The id of the parachain to spawn with the polkadot-omni-node. This is only required if the polkadot-omni-node is one of the selected platforms."
type: number
required: false
expectations-file-path:
description: "Path to the expectations file to use to compare against."
type: string
required: false
runs:
using: "composite"
steps:
- name: Checkout the Differential Tests Repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: paritytech/revive-differential-tests
ref: ${{ inputs['revive-differential-tests-ref'] }}
path: revive-differential-tests
submodules: recursive
- name: Installing the Latest Resolc
shell: bash
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
run: |
VERSION="${{ inputs['resolc-version'] }}"
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
echo "Downloading resolc v$VERSION from $ASSET_URL"
curl -Lsf --show-error -o resolc "$ASSET_URL"
chmod +x resolc
./resolc --version
- name: Installing Retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
- name: Creating a workdir for retester
shell: bash
run: mkdir workdir
- name: Downloading & Initializing the compilation caches
shell: bash
if: ${{ inputs['use-compilation-caches'] == true }}
run: |
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
- name: Building the dependencies from the Polkadot SDK
shell: bash
run: |
${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
${{ inputs['cargo-command'] }} build --locked --profile release --bin polkadot-omni-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
- name: Installing retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/core
- name: Installing report-processor
shell: bash
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/report-processor
- name: Running the Differential Tests
shell: bash
run: |
OMNI_ARGS=()
if [[ -n "${{ inputs['polkadot-omnichain-node-parachain-id'] }}" ]]; then
OMNI_ARGS+=(
--polkadot-omni-node.parachain-id
"${{ inputs['polkadot-omnichain-node-parachain-id'] }}"
)
fi
if [[ -n "${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}" ]]; then
OMNI_ARGS+=(
--polkadot-omni-node.chain-spec-path
"${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}"
)
fi
retester test \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
--platform ${{ inputs['platform'] }} \
--report.file-name report.json \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 10 \
--concurrency.number-of-concurrent-tasks 100 \
--working-directory ./workdir \
--revive-dev-node.consensus manual-seal-200 \
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
--polkadot-omni-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/polkadot-omni-node \
--resolc.path ./resolc \
"${OMNI_ARGS[@]}" || true
- name: Generate the expectation file
shell: bash
run: report-processor generate-expectations-file --report-path ./workdir/report.json --output-path ./workdir/expectations.json --remove-prefix ./revive-differential-tests/resolc-compiler-tests
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
with:
name: ${{ inputs['platform'] }}-report.json
path: ./workdir/report.json
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
with:
name: ${{ inputs['platform'] }}.json
path: ./workdir/expectations.json
- name: Check Expectations
shell: bash
if: ${{ inputs['expectations-file-path'] != '' }}
run: report-processor compare-expectation-files --base-expectation-path ${{ inputs['expectations-file-path'] }} --other-expectation-path ./workdir/expectations.json
+1 -1
View File
@@ -10,7 +10,7 @@ node_modules
*.log *.log
profile.json.gz profile.json.gz
workdir workdir*
!/schema.json !/schema.json
!/dev-genesis.json !/dev-genesis.json
Generated
+1268 -1098
View File
File diff suppressed because it is too large Load Diff
+11 -25
View File
@@ -21,7 +21,9 @@ revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interactio
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" } revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" } revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" } revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
revive-dt-report-processor = { version = "0.1.0", path = "crates/report-processor" }
alloy = { version = "1.4.1", features = ["full", "genesis", "json-rpc"] }
ansi_term = "0.12.1" ansi_term = "0.12.1"
anyhow = "1.0" anyhow = "1.0"
bson = { version = "2.15.0" } bson = { version = "2.15.0" }
@@ -72,36 +74,20 @@ indexmap = { version = "2.10.0", default-features = false }
itertools = { version = "0.14.0" } itertools = { version = "0.14.0" }
# revive compiler # revive compiler
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" } revive-solc-json-interface = { version = "0.5.0" }
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" } revive-common = { version = "0.3.0" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" } revive-differential = { version = "0.3.0" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" } zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.37"
default-features = false
features = [
"json-abi",
"providers",
"provider-ws",
"provider-ipc",
"provider-http",
"provider-debug-api",
"reqwest",
"rpc-types",
"signer-local",
"std",
"network",
"serde",
"rpc-types-eth",
"genesis",
"sol-types",
]
[profile.bench] [profile.bench]
inherits = "release" inherits = "release"
lto = true
codegen-units = 1 codegen-units = 1
lto = true
[profile.production]
inherits = "release"
codegen-units = 1
lto = true
[workspace.lints.clippy] [workspace.lints.clippy]
-1
View File
@@ -19,7 +19,6 @@ semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
schemars = { workspace = true } schemars = { workspace = true }
strum = { workspace = true } strum = { workspace = true }
tokio = { workspace = true, default-features = false, features = ["time"] }
[lints] [lints]
workspace = true workspace = true
-3
View File
@@ -1,3 +0,0 @@
mod poll;
pub use poll::*;
-72
View File
@@ -1,72 +0,0 @@
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
/// A function that polls for a fallible future for some period of time and errors if it fails to
/// get a result after polling.
///
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
/// returns an [`Err`] in which case the function stops polling and returns the error.
///
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
/// the permitted polling duration then this function returns an [`Err`]
///
/// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue
pub async fn poll<F, O>(
polling_duration: Duration,
polling_wait_behavior: PollingWaitBehavior,
mut future: impl FnMut() -> F,
) -> Result<O>
where
F: Future<Output = Result<ControlFlow<O, ()>>>,
{
let mut retries = 0;
let mut total_wait_duration = Duration::ZERO;
let max_allowed_wait_duration = polling_duration;
loop {
if total_wait_duration >= max_allowed_wait_duration {
break Err(anyhow!(
"Polling failed after {} retries and a total of {:?} of wait time",
retries,
total_wait_duration
));
}
match future()
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration,
PollingWaitBehavior::ExponentialBackoff => {
Duration::from_secs(2u64.pow(retries))
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
}
};
let next_wait_duration =
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
total_wait_duration += next_wait_duration;
retries += 1;
tokio::time::sleep(next_wait_duration).await;
}
ControlFlow::Break(output) => {
break Ok(output);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub enum PollingWaitBehavior {
Constant(Duration),
#[default]
ExponentialBackoff,
}
-1
View File
@@ -3,7 +3,6 @@
pub mod cached_fs; pub mod cached_fs;
pub mod fs; pub mod fs;
pub mod futures;
pub mod iterators; pub mod iterators;
pub mod macros; pub mod macros;
pub mod types; pub mod types;
+8
View File
@@ -39,6 +39,12 @@ pub enum PlatformIdentifier {
ZombienetPolkavmResolc, ZombienetPolkavmResolc,
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler. /// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
ZombienetRevmSolc, ZombienetRevmSolc,
/// A polkadot-omni-chain based node with a custom runtime with the PolkaVM backend and the
/// resolc compiler.
PolkadotOmniNodePolkavmResolc,
/// A polkadot-omni-chain based node with a custom runtime with the REVM backend and the solc
/// compiler.
PolkadotOmniNodeRevmSolc,
} }
/// An enum of the platform identifiers of all of the platforms supported by this framework. /// An enum of the platform identifiers of all of the platforms supported by this framework.
@@ -95,6 +101,8 @@ pub enum NodeIdentifier {
ReviveDevNode, ReviveDevNode,
/// A zombienet spawned nodes /// A zombienet spawned nodes
Zombienet, Zombienet,
/// The polkadot-omni-node.
PolkadotOmniNode,
} }
/// An enum representing the identifiers of the supported VMs. /// An enum representing the identifiers of the supported VMs.
+12
View File
@@ -23,6 +23,18 @@ pub struct Mode {
pub version: Option<semver::VersionReq>, pub version: Option<semver::VersionReq>,
} }
impl Ord for Mode {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.to_string().cmp(&other.to_string())
}
}
impl PartialOrd for Mode {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Display for Mode { impl Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.pipeline.fmt(f)?; self.pipeline.fmt(f)?;
@@ -1,10 +1,15 @@
use std::{fmt::Display, path::PathBuf, str::FromStr}; use std::{
fmt::Display,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{Context as _, bail}; use anyhow::{Context as _, bail};
use serde::{Deserialize, Serialize};
use crate::types::Mode; use crate::types::Mode;
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ParsedTestSpecifier { pub enum ParsedTestSpecifier {
/// All of the test cases in the file should be ran across all of the specified modes /// All of the test cases in the file should be ran across all of the specified modes
FileOrDirectory { FileOrDirectory {
@@ -34,6 +39,22 @@ pub enum ParsedTestSpecifier {
}, },
} }
impl ParsedTestSpecifier {
pub fn metadata_path(&self) -> &Path {
match self {
ParsedTestSpecifier::FileOrDirectory {
metadata_or_directory_file_path: metadata_file_path,
}
| ParsedTestSpecifier::Case {
metadata_file_path, ..
}
| ParsedTestSpecifier::CaseWithMode {
metadata_file_path, ..
} => metadata_file_path,
}
}
}
impl Display for ParsedTestSpecifier { impl Display for ParsedTestSpecifier {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
@@ -131,3 +152,22 @@ impl TryFrom<&str> for ParsedTestSpecifier {
value.parse() value.parse()
} }
} }
impl Serialize for ParsedTestSpecifier {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_string().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for ParsedTestSpecifier {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let string = String::deserialize(deserializer)?;
string.parse().map_err(serde::de::Error::custom)
}
}
+85 -33
View File
@@ -12,9 +12,13 @@ use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_solc_json_interface::{ use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, PolkaVMDefaultHeapMemorySize, PolkaVMDefaultStackMemorySize, SolcStandardJsonInput,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonOutput, SolcStandardJsonInputSettingsLibraries, SolcStandardJsonInputSettingsMetadata,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsPolkaVM,
SolcStandardJsonInputSettingsPolkaVMMemory, SolcStandardJsonInputSettingsSelection,
SolcStandardJsonOutput, standard_json::input::settings::optimizer::Optimizer,
standard_json::input::settings::optimizer::details::Details,
}; };
use tracing::{Span, field::display}; use tracing::{Span, field::display};
@@ -25,6 +29,7 @@ use crate::{
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use std::collections::BTreeSet;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode. /// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
@@ -37,6 +42,10 @@ struct ResolcInner {
solc: Solc, solc: Solc,
/// Path to the `resolc` executable /// Path to the `resolc` executable
resolc_path: PathBuf, resolc_path: PathBuf,
/// The PVM heap size in bytes.
pvm_heap_size: u32,
/// The PVM stack size in bytes.
pvm_stack_size: u32,
} }
impl Resolc { impl Resolc {
@@ -63,10 +72,35 @@ impl Resolc {
Self(Arc::new(ResolcInner { Self(Arc::new(ResolcInner {
solc, solc,
resolc_path: resolc_configuration.path.clone(), resolc_path: resolc_configuration.path.clone(),
pvm_heap_size: resolc_configuration
.heap_size
.unwrap_or(PolkaVMDefaultHeapMemorySize),
pvm_stack_size: resolc_configuration
.stack_size
.unwrap_or(PolkaVMDefaultStackMemorySize),
})) }))
}) })
.clone()) .clone())
} }
fn polkavm_settings(&self) -> SolcStandardJsonInputSettingsPolkaVM {
SolcStandardJsonInputSettingsPolkaVM::new(
Some(SolcStandardJsonInputSettingsPolkaVMMemory::new(
Some(self.0.pvm_heap_size),
Some(self.0.pvm_stack_size),
)),
false,
)
}
fn inject_polkavm_settings(&self, input: &SolcStandardJsonInput) -> Result<serde_json::Value> {
let mut input_value = serde_json::to_value(input)
.context("Failed to serialize Standard JSON input for resolc")?;
if let Some(settings) = input_value.get_mut("settings") {
settings["polkavm"] = serde_json::to_value(self.polkavm_settings()).unwrap();
}
Ok(input_value)
}
} }
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
@@ -121,8 +155,8 @@ impl SolidityCompiler for Resolc {
.collect(), .collect(),
settings: SolcStandardJsonInputSettings { settings: SolcStandardJsonInputSettings {
evm_version, evm_version,
libraries: Some( libraries: SolcStandardJsonInputSettingsLibraries {
libraries inner: libraries
.into_iter() .into_iter()
.map(|(source_code, libraries_map)| { .map(|(source_code, libraries_map)| {
( (
@@ -136,23 +170,29 @@ impl SolidityCompiler for Resolc {
) )
}) })
.collect(), .collect(),
), },
remappings: None, remappings: BTreeSet::<String>::new(),
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), output_selection: SolcStandardJsonInputSettingsSelection::new_required(),
via_ir: Some(true), via_ir: Some(true),
optimizer: SolcStandardJsonInputSettingsOptimizer::new( optimizer: SolcStandardJsonInputSettingsOptimizer::new(
optimization optimization
.unwrap_or(ModeOptimizerSetting::M0) .unwrap_or(ModeOptimizerSetting::M0)
.optimizations_enabled(), .optimizations_enabled(),
None, Optimizer::default_mode(),
&Version::new(0, 0, 0), Details::disabled(&Version::new(0, 0, 0)),
false,
), ),
metadata: None, polkavm: self.polkavm_settings(),
polkavm: None, metadata: SolcStandardJsonInputSettingsMetadata::default(),
detect_missing_libraries: false,
}, },
}; };
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); // Manually inject polkavm settings since it's marked skip_serializing in the upstream crate
let std_input_json = self.inject_polkavm_settings(&input)?;
Span::current().record(
"json_in",
display(serde_json::to_string(&std_input_json).unwrap()),
);
let path = &self.0.resolc_path; let path = &self.0.resolc_path;
let mut command = AsyncCommand::new(path); let mut command = AsyncCommand::new(path);
@@ -181,8 +221,9 @@ impl SolidityCompiler for Resolc {
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?; .with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&std_input_json)
.context("Failed to serialize Standard JSON input for resolc")?; .context("Failed to serialize Standard JSON input for resolc")?;
stdin_pipe stdin_pipe
.write_all(&serialized_input) .write_all(&serialized_input)
.await .await
@@ -208,14 +249,18 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Compilation failed with an error: {message}"); anyhow::bail!("Compilation failed with an error: {message}");
} }
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout) let parsed: SolcStandardJsonOutput = {
.map_err(|e| { let mut deserializer = serde_json::Deserializer::from_slice(&stdout);
anyhow::anyhow!( deserializer.disable_recursion_limit();
"failed to parse resolc JSON output: {e}\nstderr: {}", serde::de::Deserialize::deserialize(&mut deserializer)
String::from_utf8_lossy(&stderr) .map_err(|e| {
) anyhow::anyhow!(
}) "failed to parse resolc JSON output: {e}\nstderr: {}",
.context("Failed to parse resolc standard JSON output")?; String::from_utf8_lossy(&stderr)
)
})
.context("Failed to parse resolc standard JSON output")?
};
tracing::debug!( tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(), output = %serde_json::to_string(&parsed).unwrap(),
@@ -224,7 +269,7 @@ impl SolidityCompiler for Resolc {
// Detecting if the compiler output contained errors and reporting them through logs and // Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors. // errors instead of returning the compiler output that might contain errors.
for error in parsed.errors.iter().flatten() { for error in parsed.errors.iter() {
if error.severity == "error" { if error.severity == "error" {
tracing::error!( tracing::error!(
?error, ?error,
@@ -236,12 +281,12 @@ impl SolidityCompiler for Resolc {
} }
} }
let Some(contracts) = parsed.contracts else { if parsed.contracts.is_empty() {
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
}; }
let mut compiler_output = CompilerOutput::default(); let mut compiler_output = CompilerOutput::default();
for (source_path, contracts) in contracts.into_iter() { for (source_path, contracts) in parsed.contracts.into_iter() {
let src_for_msg = source_path.clone(); let src_for_msg = source_path.clone();
let source_path = PathBuf::from(source_path) let source_path = PathBuf::from(source_path)
.canonicalize() .canonicalize()
@@ -249,15 +294,22 @@ impl SolidityCompiler for Resolc {
let map = compiler_output.contracts.entry(source_path).or_default(); let map = compiler_output.contracts.entry(source_path).or_default();
for (contract_name, contract_information) in contracts.into_iter() { for (contract_name, contract_information) in contracts.into_iter() {
let bytecode = contract_information let Some(bytecode) = contract_information
.evm .evm
.and_then(|evm| evm.bytecode.clone()) .and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?; else {
tracing::debug!(
"Skipping abstract or interface contract {} - no bytecode",
contract_name
);
continue;
};
let abi = { let abi = {
let metadata = contract_information let metadata = &contract_information.metadata;
.metadata if metadata.is_null() {
.as_ref() anyhow::bail!("No metadata found for the contract");
.context("No metadata found for the contract")?; }
let solc_metadata_str = match metadata { let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => { serde_json::Value::String(solc_metadata_str) => {
solc_metadata_str.as_str() solc_metadata_str.as_str()
+122
View File
@@ -143,6 +143,17 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for Context {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<EthRpcConfiguration> for Context { impl AsRef<EthRpcConfiguration> for Context {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
match self { match self {
@@ -228,6 +239,7 @@ pub struct TestExecutionContext {
#[arg( #[arg(
short = 'p', short = 'p',
long = "platform", long = "platform",
id = "platforms",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"] default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)] )]
pub platforms: Vec<PlatformIdentifier>, pub platforms: Vec<PlatformIdentifier>,
@@ -277,6 +289,10 @@ pub struct TestExecutionContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the Eth Rpc. /// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")] #[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration, pub eth_rpc_configuration: EthRpcConfiguration,
@@ -375,6 +391,23 @@ pub struct BenchmarkingContext {
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)] #[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
pub default_repetition_count: usize, pub default_repetition_count: usize,
/// This transaction controls whether the benchmarking driver should await for transactions to
/// be included in a block before moving on to the next transaction in the sequence or not.
///
/// This behavior is useful in certain cases and not so useful in others. For example, in some
/// repetition block if there's some kind of relationship between txs n and n+1 (for example a
/// mint then a transfer) then you would want to wait for the minting to happen and then move on
/// to the transfers. On the other hand, if there's no relationship between the transactions n
/// and n+1 (e.g., mint and another mint of a different token) then awaiting the first mint to
/// be included in a block might not seem necessary.
///
/// By default, this behavior is set to false to allow the benchmarking framework to saturate
/// the node's mempool as quickly as possible. However, as explained above, there are cases
/// where it's needed and certain workloads where failure to provide this argument would lead to
/// inaccurate results.
#[arg(long)]
pub await_transaction_inclusion: bool,
/// Configuration parameters for the corpus files to use. /// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")] #[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration, pub corpus_configuration: CorpusConfiguration,
@@ -403,6 +436,10 @@ pub struct BenchmarkingContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the Eth Rpc. /// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")] #[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration, pub eth_rpc_configuration: EthRpcConfiguration,
@@ -481,6 +518,10 @@ pub struct ExportGenesisContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the wallet. /// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")] #[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration, pub wallet_configuration: WalletConfiguration,
@@ -540,6 +581,12 @@ impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for TestExecutionContext { impl AsRef<EthRpcConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration &self.eth_rpc_configuration
@@ -636,6 +683,12 @@ impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for BenchmarkingContext { impl AsRef<EthRpcConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration &self.eth_rpc_configuration
@@ -696,6 +749,12 @@ impl AsRef<ReviveDevNodeConfiguration> for ExportGenesisContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<WalletConfiguration> for ExportGenesisContext { impl AsRef<WalletConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &WalletConfiguration { fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration &self.wallet_configuration
@@ -741,6 +800,17 @@ pub struct ResolcConfiguration {
/// provided in the user's $PATH. /// provided in the user's $PATH.
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")] #[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
pub path: PathBuf, pub path: PathBuf,
/// Specifies the PVM heap size in bytes.
///
/// If unspecified, the revive compiler default is used
#[clap(id = "resolc.heap-size", long = "resolc.heap-size")]
pub heap_size: Option<u32>,
/// Specifies the PVM stack size in bytes.
///
/// If unspecified, the revive compiler default is used
#[clap(id = "resolc.stack-size", long = "resolc.stack-size")]
pub stack_size: Option<u32>,
} }
/// A set of configuration parameters for Polkadot Parachain. /// A set of configuration parameters for Polkadot Parachain.
@@ -852,6 +922,54 @@ pub struct ReviveDevNodeConfiguration {
pub existing_rpc_url: Vec<String>, pub existing_rpc_url: Vec<String>,
} }
/// A set of configuration parameters for the polkadot-omni-node.
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct PolkadotOmnichainNodeConfiguration {
/// Specifies the path of the polkadot-omni-node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the polkadot-omni-node
/// binary that's provided in the user's $PATH.
#[clap(
id = "polkadot-omni-node.path",
long = "polkadot-omni-node.path",
default_value = "polkadot-omni-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "polkadot-omni-node.start-timeout-ms",
long = "polkadot-omni-node.start-timeout-ms",
default_value = "90000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// Defines how often blocks will be sealed by the node in milliseconds.
#[clap(
id = "polkadot-omni-node.block-time-ms",
long = "polkadot-omni-node.block-time-ms",
default_value = "200",
value_parser = parse_duration
)]
pub block_time: Duration,
/// The path of the chainspec of the chain that we're spawning
#[clap(
id = "polkadot-omni-node.chain-spec-path",
long = "polkadot-omni-node.chain-spec-path"
)]
pub chain_spec_path: Option<PathBuf>,
/// The ID of the parachain that the polkadot-omni-node will spawn. This argument is required if
/// the polkadot-omni-node is one of the selected platforms for running the tests or benchmarks.
#[clap(
id = "polkadot-omni-node.parachain-id",
long = "polkadot-omni-node.parachain-id"
)]
pub parachain_id: Option<usize>,
}
/// A set of configuration parameters for the ETH RPC. /// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct EthRpcConfiguration { pub struct EthRpcConfiguration {
@@ -1006,6 +1124,10 @@ pub struct ReportConfiguration {
/// Controls if the compiler output is included in the final report. /// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")] #[clap(long = "report.include-compiler-output")]
pub include_compiler_output: bool, pub include_compiler_output: bool,
/// The filename to use for the report.
#[clap(long = "report.file-name")]
pub file_name: Option<String>,
} }
#[derive(Clone, Debug, Parser, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize, Deserialize)]
@@ -1,6 +1,5 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
ops::ControlFlow,
sync::{ sync::{
Arc, Arc,
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
@@ -13,6 +12,7 @@ use alloy::{
json_abi::JsonAbi, json_abi::JsonAbi,
network::{Ethereum, TransactionBuilder}, network::{Ethereum, TransactionBuilder},
primitives::{Address, TxHash, U256}, primitives::{Address, TxHash, U256},
providers::Provider,
rpc::types::{ rpc::types::{
TransactionReceipt, TransactionRequest, TransactionReceipt, TransactionRequest,
trace::geth::{ trace::geth::{
@@ -22,12 +22,9 @@ use alloy::{
}, },
}; };
use anyhow::{Context as _, Result, bail}; use anyhow::{Context as _, Result, bail};
use futures::TryFutureExt; use futures::{FutureExt as _, TryFutureExt};
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::{ use revive_dt_common::types::PrivateKeyAllocator;
futures::{PollingWaitBehavior, poll},
types::PrivateKeyAllocator,
};
use revive_dt_format::{ use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent}, metadata::{ContractInstance, ContractPathAndIdent},
steps::{ steps::{
@@ -37,7 +34,7 @@ use revive_dt_format::{
traits::{ResolutionContext, ResolverApi}, traits::{ResolutionContext, ResolverApi},
}; };
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender}; use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument}; use tracing::{Span, debug, error, field::display, info, instrument};
use crate::{ use crate::{
differential_benchmarks::{ExecutionState, WatcherEvent}, differential_benchmarks::{ExecutionState, WatcherEvent},
@@ -73,6 +70,10 @@ pub struct Driver<'a, I> {
/// The number of steps that were executed on the driver. /// The number of steps that were executed on the driver.
steps_executed: usize, steps_executed: usize,
/// This function controls if the driver should wait for transactions to be included in a block
/// or not before proceeding forward.
await_transaction_inclusion: bool,
/// This is the queue of steps that are to be executed by the driver for this test case. Each /// This is the queue of steps that are to be executed by the driver for this test case. Each
/// time `execute_step` is called one of the steps is executed. /// time `execute_step` is called one of the steps is executed.
steps_iterator: I, steps_iterator: I,
@@ -89,6 +90,7 @@ where
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
cached_compiler: &CachedCompiler<'a>, cached_compiler: &CachedCompiler<'a>,
watcher_tx: UnboundedSender<WatcherEvent>, watcher_tx: UnboundedSender<WatcherEvent>,
await_transaction_inclusion: bool,
steps: I, steps: I,
) -> Result<Self> { ) -> Result<Self> {
let mut this = Driver { let mut this = Driver {
@@ -104,6 +106,7 @@ where
execution_state: ExecutionState::empty(), execution_state: ExecutionState::empty(),
steps_executed: 0, steps_executed: 0,
steps_iterator: steps, steps_iterator: steps,
await_transaction_inclusion,
watcher_tx, watcher_tx,
}; };
this.init_execution_state(cached_compiler) this.init_execution_state(cached_compiler)
@@ -166,7 +169,7 @@ where
code, code,
); );
let receipt = self let receipt = self
.execute_transaction(tx, None) .execute_transaction(tx, None, Duration::from_secs(5 * 60))
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
@@ -365,7 +368,30 @@ where
let tx = step let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context()) .as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?; .await?;
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
let (tx_hash, receipt_future) = self
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
.await?;
if self.await_transaction_inclusion {
let receipt = receipt_future
.await
.context("Failed while waiting for transaction inclusion in block")?;
if !receipt.status() {
error!(
?tx,
tx.hash = %receipt.transaction_hash,
?receipt,
"Encountered a failing benchmark transaction"
);
bail!(
"Encountered a failing transaction in benchmarks: {}",
receipt.transaction_hash
)
}
}
Ok(tx_hash)
} }
} }
} }
@@ -466,6 +492,7 @@ where
.collect::<Vec<_>>(); .collect::<Vec<_>>();
steps.into_iter() steps.into_iter()
}, },
await_transaction_inclusion: self.await_transaction_inclusion,
watcher_tx: self.watcher_tx.clone(), watcher_tx: self.watcher_tx.clone(),
}) })
.map(|driver| driver.execute_all()); .map(|driver| driver.execute_all());
@@ -632,7 +659,7 @@ where
}; };
let receipt = match self let receipt = match self
.execute_transaction(tx, step_path) .execute_transaction(tx, step_path, Duration::from_secs(5 * 60))
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
{ {
@@ -677,18 +704,33 @@ where
#[instrument( #[instrument(
level = "info", level = "info",
skip_all, skip_all,
fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty) fields(
driver_id = self.driver_id,
transaction = ?transaction,
transaction_hash = tracing::field::Empty
),
err(Debug)
)] )]
async fn execute_transaction( async fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
step_path: Option<&StepPath>, step_path: Option<&StepPath>,
receipt_wait_duration: Duration,
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> { ) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node; let node = self.platform_information.node;
let transaction_hash = node let provider = node.provider().await.context("Creating provider failed")?;
.submit_transaction(transaction)
let pending_transaction_builder = provider
.send_transaction(transaction)
.await .await
.context("Failed to submit transaction")?; .context("Failed to submit transaction")?;
let transaction_hash = *pending_transaction_builder.tx_hash();
let receipt_future = pending_transaction_builder
.with_timeout(Some(receipt_wait_duration))
.with_required_confirmations(2)
.get_receipt()
.map(|res| res.context("Failed to get the receipt of the transaction"));
Span::current().record("transaction_hash", display(transaction_hash)); Span::current().record("transaction_hash", display(transaction_hash));
info!("Submitted transaction"); info!("Submitted transaction");
@@ -701,28 +743,7 @@ where
.context("Failed to send the transaction hash to the watcher")?; .context("Failed to send the transaction hash to the watcher")?;
}; };
Ok((transaction_hash, async move { Ok((transaction_hash, receipt_future))
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
} }
// endregion:Transaction Execution // endregion:Transaction Execution
} }
@@ -160,6 +160,7 @@ pub async fn handle_differential_benchmarks(
private_key_allocator, private_key_allocator,
cached_compiler.as_ref(), cached_compiler.as_ref(),
watcher_tx.clone(), watcher_tx.clone(),
context.await_transaction_inclusion,
test_definition test_definition
.case .case
.steps_iterator_for_benchmarks(context.default_repetition_count) .steps_iterator_for_benchmarks(context.default_repetition_count)
@@ -139,23 +139,18 @@ impl Watcher {
break; break;
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
// Remove all of the transaction hashes observed in this block from the txs we // Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for. // are currently watching for.
let mut watch_for_transaction_hashes = let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await; watch_for_transaction_hashes.write().await;
let mut relevant_transactions_observed = 0;
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() { for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
let Some((step_path, submission_time)) = let Some((step_path, submission_time)) =
watch_for_transaction_hashes.remove(tx_hash) watch_for_transaction_hashes.remove(tx_hash)
else { else {
continue; continue;
}; };
relevant_transactions_observed += 1;
let transaction_information = TransactionInformation { let transaction_information = TransactionInformation {
transaction_hash: *tx_hash, transaction_hash: *tx_hash,
submission_timestamp: submission_time submission_timestamp: submission_time
@@ -172,6 +167,14 @@ impl Watcher {
) )
.expect("Can't fail") .expect("Can't fail")
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
relevant_transactions_observed,
remaining_transactions = watch_for_transaction_hashes.len(),
"Observed a block"
);
} }
info!("Watcher's Block Watching Task Finished"); info!("Watcher's Block Watching Task Finished");
@@ -409,7 +409,6 @@ where
.handle_function_call_execution(step, deployment_receipts) .handle_function_call_execution(step, deployment_receipts)
.await .await
.context("Failed to handle the function call execution")?; .context("Failed to handle the function call execution")?;
tracing::Span::current().record("block_number", execution_receipt.block_number);
let tracing_result = self let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await .await
+152 -7
View File
@@ -14,9 +14,12 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{ use revive_dt_node::{
Node, node_implementations::geth::GethNode, Node,
node_implementations::lighthouse_geth::LighthouseGethNode, node_implementations::{
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode, geth::GethNode, lighthouse_geth::LighthouseGethNode,
polkadot_omni_node::PolkadotOmnichainNode, substrate::SubstrateNode,
zombienet::ZombienetNode,
},
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
@@ -91,7 +94,8 @@ impl Platform for GethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = GethNode::new(context); let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = GethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<GethNode>(node, genesis)?; let node = spawn_node::<GethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -145,7 +149,8 @@ impl Platform for LighthouseGethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = LighthouseGethNode::new(context); let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?; let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -206,12 +211,14 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -274,12 +281,14 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -338,7 +347,9 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context); let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -395,7 +406,9 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context); let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -422,6 +435,126 @@ impl Platform for ZombienetRevmSolcPlatform {
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct PolkadotOmniNodePolkavmResolcPlatform;
impl Platform for PolkadotOmniNodePolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::PolkadotOmniNodePolkavmResolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::PolkadotOmniNode
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
PolkadotOmnichainNode::node_genesis(
&wallet,
polkadot_omnichain_node_configuration
.chain_spec_path
.as_ref()
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct PolkadotOmniNodeRevmSolcPlatform;
impl Platform for PolkadotOmniNodeRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::PolkadotOmniNodeRevmSolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::PolkadotOmniNode
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
PolkadotOmnichainNode::node_genesis(
&wallet,
polkadot_omnichain_node_configuration
.chain_spec_path
.as_ref()
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
)
}
}
impl From<PlatformIdentifier> for Box<dyn Platform> { impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
@@ -439,6 +572,12 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_> Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
} }
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>, PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
Box::new(PolkadotOmniNodePolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
Box::new(PolkadotOmniNodeRevmSolcPlatform) as Box<_>
}
} }
} }
} }
@@ -460,6 +599,12 @@ impl From<PlatformIdentifier> for &dyn Platform {
&ZombienetPolkavmResolcPlatform as &dyn Platform &ZombienetPolkavmResolcPlatform as &dyn Platform
} }
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform, PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
&PolkadotOmniNodePolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
&PolkadotOmniNodeRevmSolcPlatform as &dyn Platform
}
} }
} }
} }
+37 -6
View File
@@ -2,9 +2,9 @@ mod differential_benchmarks;
mod differential_tests; mod differential_tests;
mod helpers; mod helpers;
use anyhow::Context as _; use anyhow::{Context as _, bail};
use clap::Parser; use clap::Parser;
use revive_dt_report::ReportAggregator; use revive_dt_report::{ReportAggregator, TestCaseStatus};
use schemars::schema_for; use schemars::schema_for;
use tracing::{info, level_filters::LevelFilter}; use tracing::{info, level_filters::LevelFilter};
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -57,8 +57,22 @@ fn main() -> anyhow::Result<()> {
let differential_tests_handling_task = let differential_tests_handling_task =
handle_differential_tests(*context, reporter); handle_differential_tests(*context, reporter);
futures::future::try_join(differential_tests_handling_task, report_aggregator_task) let (_, report) = futures::future::try_join(
.await?; differential_tests_handling_task,
report_aggregator_task,
)
.await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some tests failed")
}
Ok(()) Ok(())
}), }),
@@ -71,12 +85,23 @@ fn main() -> anyhow::Result<()> {
let differential_benchmarks_handling_task = let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter); handle_differential_benchmarks(*context, reporter);
futures::future::try_join( let (_, report) = futures::future::try_join(
differential_benchmarks_handling_task, differential_benchmarks_handling_task,
report_aggregator_task, report_aggregator_task,
) )
.await?; .await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some benchmarks failed")
}
Ok(()) Ok(())
}), }),
Context::ExportGenesis(ref export_genesis_context) => { Context::ExportGenesis(ref export_genesis_context) => {
@@ -85,11 +110,17 @@ fn main() -> anyhow::Result<()> {
let genesis_json = serde_json::to_string_pretty(&genesis) let genesis_json = serde_json::to_string_pretty(&genesis)
.context("Failed to serialize the genesis to JSON")?; .context("Failed to serialize the genesis to JSON")?;
println!("{genesis_json}"); println!("{genesis_json}");
Ok(()) Ok(())
} }
Context::ExportJsonSchema => { Context::ExportJsonSchema => {
let schema = schema_for!(Metadata); let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap()); println!(
"{}",
serde_json::to_string_pretty(&schema)
.context("Failed to export the JSON schema")?
);
Ok(()) Ok(())
} }
} }
+19 -94
View File
@@ -3,7 +3,6 @@
use std::{ use std::{
fs::{File, create_dir_all, remove_dir_all}, fs::{File, create_dir_all, remove_dir_all},
io::Read, io::Read,
ops::ControlFlow,
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::{Command, Stdio}, process::{Command, Stdio},
@@ -35,12 +34,9 @@ use anyhow::Context as _;
use futures::{FutureExt, Stream, StreamExt}; use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use tracing::{Instrument, error, instrument}; use tracing::{error, instrument};
use revive_dt_common::{ use revive_dt_common::fs::clear_directory;
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -76,6 +72,7 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl GethNode { impl GethNode {
@@ -89,17 +86,12 @@ impl GethNode {
const READY_MARKER: &str = "IPC endpoint opened"; const READY_MARKER: &str = "IPC endpoint opened";
const ERROR_MARKER: &str = "Fatal:"; const ERROR_MARKER: &str = "Fatal:";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
pub fn new( pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<GethConfiguration> + AsRef<GethConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -126,6 +118,7 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -246,7 +239,8 @@ impl GethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default(), FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -337,62 +331,15 @@ impl EthereumNode for GethNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self self.provider()
.provider()
.await .await
.context("Failed to create provider for transaction submission")?; .context("Failed to create provider for transaction submission")?
let pending_transaction = provider
.send_transaction(transaction) .send_transaction(transaction)
.await .await
.inspect_err( .context("Encountered an error when submitting a transaction")?
|err| error!(%err, "Encountered an error when submitting the transaction"), .get_receipt()
) .await
.context("Failed to submit transaction to geth node")?; .context("Failed to get the receipt for the transaction")
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used
// to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
// implement a retry mechanism for the receipt to keep retrying to get it until it
// eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially when
// a lot of transactions are being submitted to the node. Thus, while initially we only
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
// backoff each time we attempt to get the receipt and find that it's not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
move || {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(transaction_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(None) => Ok(ControlFlow::Continue(())),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.instrument(tracing::info_span!(
"Awaiting transaction receipt",
?transaction_hash
))
.await
}) })
} }
@@ -403,34 +350,12 @@ impl EthereumNode for GethNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self self.provider()
.provider()
.await .await
.context("Failed to create provider for tracing")?; .context("Failed to create provider for tracing")?
poll( .debug_trace_transaction(tx_hash, trace_options)
Self::TRACE_POLLING_DURATION, .await
PollingWaitBehavior::Constant(Duration::from_millis(200)), .context("Failed to get the transaction trace")
move || {
let provider = provider.clone();
let trace_options = trace_options.clone();
async move {
match provider
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.await
}) })
} }
@@ -742,7 +667,7 @@ mod tests {
fn new_node() -> (TestExecutionContext, GethNode) { fn new_node() -> (TestExecutionContext, GethNode) {
let context = test_config(); let context = test_config();
let mut node = GethNode::new(&context); let mut node = GethNode::new(&context, true);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -12,7 +12,6 @@ use std::{
collections::{BTreeMap, HashSet}, collections::{BTreeMap, HashSet},
fs::{File, create_dir_all}, fs::{File, create_dir_all},
io::Read, io::Read,
ops::ControlFlow,
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::{Command, Stdio}, process::{Command, Stdio},
@@ -48,12 +47,9 @@ use revive_common::EVMVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as; use serde_with::serde_as;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use tracing::{Instrument, info, instrument}; use tracing::{info, instrument};
use revive_dt_common::{ use revive_dt_common::fs::clear_directory;
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -106,6 +102,8 @@ pub struct LighthouseGethNode {
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl LighthouseGethNode { impl LighthouseGethNode {
@@ -114,12 +112,6 @@ impl LighthouseGethNode {
const CONFIG_FILE_NAME: &str = "config.yaml"; const CONFIG_FILE_NAME: &str = "config.yaml";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
pub fn new( pub fn new(
@@ -127,6 +119,7 @@ impl LighthouseGethNode {
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<KurtosisConfiguration> + AsRef<KurtosisConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -176,6 +169,7 @@ impl LighthouseGethNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
persistent_http_provider: OnceCell::const_new(), persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(), persistent_ws_provider: OnceCell::const_new(),
use_fallback_gas_filler,
} }
} }
@@ -374,7 +368,8 @@ impl LighthouseGethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.ws_connection_string.as_str(), self.ws_connection_string.as_str(),
FallbackGasFiller::default(), FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -476,73 +471,6 @@ impl LighthouseGethNode {
Ok(()) Ok(())
} }
fn internal_execute_transaction<'a>(
transaction: TransactionRequest,
provider: FillProvider<
impl TxFiller<Ethereum> + 'a,
impl Provider<Ethereum> + Clone + 'a,
Ethereum,
>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + 'a>> {
Box::pin(async move {
let pending_transaction = provider
.send_transaction(transaction)
.await
.inspect_err(|err| {
tracing::error!(
%err,
"Encountered an error when submitting the transaction"
)
})
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
// need to implement a retry mechanism for the receipt to keep retrying to get it until
// it eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially
// when a lot of transactions are being submitted to the node. Thus, while initially we
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
// with exponential backoff each time we attempt to get the receipt and find that it's
// not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(500)),
move || {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(transaction_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(None) => Ok(ControlFlow::Continue(())),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.instrument(tracing::info_span!(
"Awaiting transaction receipt",
?transaction_hash
))
.await
})
}
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis { pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) { for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
genesis genesis
@@ -621,11 +549,15 @@ impl EthereumNode for LighthouseGethNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self self.provider()
.http_provider()
.await .await
.context("Failed to create provider for transaction execution")?; .context("Failed to create provider for transaction submission")?
Self::internal_execute_transaction(transaction, provider).await .send_transaction(transaction)
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -636,35 +568,12 @@ impl EthereumNode for LighthouseGethNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = Arc::new( self.provider()
self.http_provider() .await
.await .context("Failed to create provider for tracing")?
.context("Failed to create provider for tracing")?, .debug_trace_transaction(tx_hash, trace_options)
); .await
poll( .context("Failed to get the transaction trace")
Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
move || {
let provider = provider.clone();
let trace_options = trace_options.clone();
async move {
match provider
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.await
}) })
} }
@@ -1152,7 +1061,7 @@ mod tests {
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config(); let context = test_config();
let mut node = LighthouseGethNode::new(&context); let mut node = LighthouseGethNode::new(&context, true);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -1,4 +1,5 @@
pub mod geth; pub mod geth;
pub mod lighthouse_geth; pub mod lighthouse_geth;
pub mod polkadot_omni_node;
pub mod substrate; pub mod substrate;
pub mod zombienet; pub mod zombienet;
@@ -0,0 +1,791 @@
use std::{
fs::{File, create_dir_all, remove_dir_all},
path::{Path, PathBuf},
pin::Pin,
process::{Command, Stdio},
sync::{
Arc,
atomic::{AtomicU32, Ordering},
},
time::Duration,
};
use alloy::{
eips::BlockNumberOrTag,
genesis::Genesis,
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
use serde_json::json;
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
};
use subxt::{OnlineClient, SubstrateConfig};
use tokio::sync::OnceCell;
use tracing::{instrument, trace};
use crate::{
Node,
constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// The number of blocks that should be cached by the polkadot-omni-node and the eth-rpc.
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
/// A node implementation for the polkadot-omni-node.
#[derive(Debug)]
pub struct PolkadotOmnichainNode {
/// The id of the node.
id: u32,
/// The path of the polkadot-omni-chain node binary.
polkadot_omnichain_node_binary_path: PathBuf,
/// The path of the eth-rpc binary.
eth_rpc_binary_path: PathBuf,
/// The path of the runtime's WASM that this node will be spawned with.
chain_spec_path: Option<PathBuf>,
/// The path of the base directory which contains all of the stored data for this node.
base_directory_path: PathBuf,
/// The path of the logs directory which contains all of the stored logs.
logs_directory_path: PathBuf,
/// Defines the amount of time to wait before considering that the node start has timed out.
node_start_timeout: Duration,
/// The id of the parachain that this node will be spawning.
parachain_id: Option<usize>,
/// The block time.
block_time: Duration,
/// The node's process.
polkadot_omnichain_node_process: Option<Process>,
/// The eth-rpc's process.
eth_rpc_process: Option<Process>,
/// The URL of the eth-rpc.
rpc_url: String,
/// The wallet object that's used to sign any transaction submitted through this node.
wallet: Arc<EthereumWallet>,
/// The nonce manager used to populate nonces for all transactions submitted through this node.
nonce_manager: CachedNonceManager,
/// The provider used for all RPC interactions with the RPC of this node.
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
/// A boolean that controls if the fallback gas filler should be used or not.
use_fallback_gas_filler: bool,
}
impl PolkadotOmnichainNode {
const BASE_DIRECTORY: &str = "polkadot-omni-node";
const LOGS_DIRECTORY: &str = "logs";
const POLKADOT_OMNICHAIN_NODE_READY_MARKER: &str = "Running JSON-RPC server";
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
const BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT: u16 = 9944;
const BASE_ETH_RPC_PORT: u16 = 8545;
const POLKADOT_OMNICHAIN_NODE_LOG_ENV: &str =
"error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
const RPC_LOG_ENV: &str = "info,eth-rpc=debug";
pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<PolkadotOmnichainNodeConfiguration>,
use_fallback_gas_filler: bool,
) -> Self {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = working_directory_path
.join(Self::BASE_DIRECTORY)
.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
Self {
id,
polkadot_omnichain_node_binary_path: polkadot_omnichain_node_configuration
.path
.to_path_buf(),
eth_rpc_binary_path: eth_rpc_path.to_path_buf(),
chain_spec_path: polkadot_omnichain_node_configuration
.chain_spec_path
.clone(),
base_directory_path: base_directory,
logs_directory_path: logs_directory,
parachain_id: polkadot_omnichain_node_configuration.parachain_id,
block_time: polkadot_omnichain_node_configuration.block_time,
polkadot_omnichain_node_process: Default::default(),
eth_rpc_process: Default::default(),
rpc_url: Default::default(),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
use_fallback_gas_filler,
node_start_timeout: polkadot_omnichain_node_configuration.start_timeout_ms,
}
}
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
trace!("Removing the various directories");
let _ = remove_dir_all(self.base_directory_path.as_path());
let _ = clear_directory(&self.base_directory_path);
let _ = clear_directory(&self.logs_directory_path);
trace!("Creating the various directories");
create_dir_all(&self.base_directory_path)
.context("Failed to create base directory for polkadot-omni-node node")?;
create_dir_all(&self.logs_directory_path)
.context("Failed to create logs directory for polkadot-omni-node node")?;
let template_chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
let chainspec_json = Self::node_genesis(
&self.wallet,
self.chain_spec_path
.as_ref()
.context("No runtime path provided")?,
)
.context("Failed to prepare the chainspec command")?;
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)
.context("Failed to create polkadot-omni-node template chainspec file")?,
&chainspec_json,
)
.context("Failed to write polkadot-omni-node template chainspec JSON")?;
Ok(self)
}
fn spawn_process(&mut self) -> anyhow::Result<()> {
// Error out if the runtime's path or the parachain id are not set which means that the
// arguments we require were not provided.
self.chain_spec_path
.as_ref()
.context("No WASM path provided for the runtime")?;
self.parachain_id
.as_ref()
.context("No argument provided for the parachain-id")?;
let polkadot_omnichain_node_rpc_port =
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
let eth_rpc_port = Self::BASE_ETH_RPC_PORT + self.id as u16;
let chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
self.rpc_url = format!("http://127.0.0.1:{eth_rpc_port}");
let polkadot_omnichain_node_process = Process::new(
"node",
self.logs_directory_path.as_path(),
self.polkadot_omnichain_node_binary_path.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--log")
.arg(Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
.arg("--dev-block-time")
.arg(self.block_time.as_millis().to_string())
.arg("--rpc-port")
.arg(polkadot_omnichain_node_rpc_port.to_string())
.arg("--base-path")
.arg(self.base_directory_path.as_path())
.arg("--no-prometheus")
.arg("--no-hardware-benchmarks")
.arg("--authoring")
.arg("slot-based")
.arg("--chain")
.arg(chainspec_path)
.arg("--name")
.arg(format!("polkadot-omni-node-{}", self.id))
.arg("--rpc-methods")
.arg("unsafe")
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--pool-limit")
.arg(u32::MAX.to_string())
.arg("--pool-kbytes")
.arg(u32::MAX.to_string())
.arg("--state-pruning")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: self.node_start_timeout,
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::POLKADOT_OMNICHAIN_NODE_READY_MARKER)),
None => Ok(false),
}),
},
);
match polkadot_omnichain_node_process {
Ok(process) => self.polkadot_omnichain_node_process = Some(process),
Err(err) => {
tracing::error!(
?err,
"Failed to start polkadot-omni-node, shutting down gracefully"
);
self.shutdown().context(
"Failed to gracefully shutdown after polkadot-omni-node start error",
)?;
return Err(err);
}
}
let eth_rpc_process = Process::new(
"eth-rpc",
self.logs_directory_path.as_path(),
self.eth_rpc_binary_path.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--dev")
.arg("--rpc-port")
.arg(eth_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}"))
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--index-last-n-blocks")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.arg("--cache-size")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::RPC_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
None => Ok(false),
}),
},
);
match eth_rpc_process {
Ok(process) => self.eth_rpc_process = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start eth-rpc, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after eth-rpc start error")?;
return Err(err);
}
}
Ok(())
}
fn eth_to_substrate_address(address: &Address) -> String {
let eth_bytes = address.0.0;
let mut padded = [0xEEu8; 32];
padded[..20].copy_from_slice(&eth_bytes);
let account_id = AccountId32::from(padded);
account_id.to_ss58check()
}
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_rpc_binary_path)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to construct the provider")
})
.await
.cloned()
}
pub fn node_genesis(
wallet: &EthereumWallet,
chain_spec_path: &Path,
) -> anyhow::Result<serde_json::Value> {
let unmodified_chainspec_file =
File::open(chain_spec_path).context("Failed to open the unmodified chainspec file")?;
let mut chainspec_json =
serde_json::from_reader::<_, serde_json::Value>(&unmodified_chainspec_file)
.context("Failed to read the unmodified chainspec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array_mut()
.expect("Can't fail");
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
let substrate_address = Self::eth_to_substrate_address(&address);
let balance = INITIAL_BALANCE;
existing_chainspec_balances.push(json!((substrate_address, balance)));
}
Ok(chainspec_json)
}
}
impl EthereumNode for PolkadotOmnichainNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
fn id(&self) -> usize {
self.id as _
}
fn connection_string(&self) -> &str {
&self.rpc_url
}
fn submit_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.context("Failed to submit the transaction through the provider")?;
Ok(*pending_transaction.tx_hash())
})
}
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for getting the receipt")?
.get_transaction_receipt(tx_hash)
.await
.context("Failed to get the receipt of the transaction")?
.context("Failed to get the receipt of the transaction")
})
}
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
})
}
fn trace_transaction(
&self,
tx_hash: TxHash,
trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for debug tracing")?
.debug_trace_transaction(tx_hash, trace_options)
.await
.context("Failed to obtain debug trace from eth-proxy")
})
}
fn state_diff(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
Box::pin(async move {
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true),
disable_code: None,
disable_storage: None,
});
match self
.trace_transaction(tx_hash, trace_options)
.await?
.try_into_pre_state_frame()?
{
PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"),
}
})
}
fn balance_of(
&self,
address: Address,
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to get the eth-rpc provider")?
.get_balance(address)
.await
.map_err(Into::into)
})
}
fn latest_state_proof(
&self,
address: Address,
keys: Vec<StorageKey>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to get the eth-rpc provider")?
.get_proof(address, keys)
.latest()
.await
.map_err(Into::into)
})
}
fn resolver(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
Box::pin(async move {
let id = self.id;
let provider = self.provider().await?;
Ok(Arc::new(PolkadotOmnichainNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
})
}
fn evm_version(&self) -> EVMVersion {
EVMVersion::Cancun
}
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
> {
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
pub mod revive {}
Box::pin(async move {
let polkadot_omnichain_node_rpc_port =
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
let polkadot_omnichain_node_rpc_url =
format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}");
let api = OnlineClient::<SubstrateConfig>::from_url(polkadot_omnichain_node_rpc_url)
.await
.context("Failed to create subxt rpc client")?;
let provider = self.provider().await.context("Failed to create provider")?;
let block_stream = api
.blocks()
.subscribe_all()
.await
.context("Failed to subscribe to blocks")?;
let mined_block_information_stream = block_stream.filter_map(move |block| {
let api = api.clone();
let provider = provider.clone();
async move {
let substrate_block = block.ok()?;
let revive_block = provider
.get_block_by_number(
BlockNumberOrTag::Number(substrate_block.number() as _),
)
.await
.expect("TODO: Remove")
.expect("TODO: Remove");
let used = api
.storage()
.at(substrate_block.reference())
.fetch_or_default(&revive::storage().system().block_weight())
.await
.expect("TODO: Remove");
let block_ref_time = (used.normal.ref_time as u128)
+ (used.operational.ref_time as u128)
+ (used.mandatory.ref_time as u128);
let block_proof_size = (used.normal.proof_size as u128)
+ (used.operational.proof_size as u128)
+ (used.mandatory.proof_size as u128);
let limits = api
.constants()
.at(&revive::constants().system().block_weights())
.expect("TODO: Remove");
let max_ref_time = limits.max_block.ref_time;
let max_proof_size = limits.max_block.proof_size;
Some(MinedBlockInformation {
ethereum_block_information: EthereumMinedBlockInformation {
block_number: revive_block.number(),
block_timestamp: revive_block.header.timestamp,
mined_gas: revive_block.header.gas_used as _,
block_gas_limit: revive_block.header.gas_limit as _,
transaction_hashes: revive_block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
},
substrate_block_information: Some(SubstrateMinedBlockInformation {
ref_time: block_ref_time,
max_ref_time,
proof_size: block_proof_size,
max_proof_size,
}),
tx_counts: Default::default(),
})
}
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct PolkadotOmnichainNodeResolver {
id: u32,
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
}
impl ResolverApi for PolkadotOmnichainNodeResolver {
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn chain_id(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn transaction_gas_price(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
Box::pin(async move {
self.provider
.get_transaction_receipt(tx_hash)
.await?
.context("Failed to get the transaction receipt")
.map(|receipt| receipt.effective_gas_price)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_gas_limit(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.gas_limit as _)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_coinbase(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.beneficiary)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_difficulty(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_base_fee(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.and_then(|block| {
block
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
})
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_hash(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.hash)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_timestamp(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.timestamp)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
}
}
impl Node for PolkadotOmnichainNode {
fn shutdown(&mut self) -> anyhow::Result<()> {
drop(self.polkadot_omnichain_node_process.take());
drop(self.eth_rpc_process.take());
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
// there's nothing to be deleted.
let _ = remove_dir_all(self.base_directory_path.join("data"));
Ok(())
}
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()
}
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.polkadot_omnichain_node_binary_path)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.context("Failed to spawn substrate --version")?
.wait_with_output()
.context("Failed to wait for substrate --version")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
}
impl Drop for PolkadotOmnichainNode {
fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown")
}
}
@@ -4,7 +4,7 @@ use std::{
pin::Pin, pin::Pin,
process::{Command, Stdio}, process::{Command, Stdio},
sync::{ sync::{
Arc, Arc, Mutex,
atomic::{AtomicU32, Ordering}, atomic::{AtomicU32, Ordering},
}, },
time::Duration, time::Duration,
@@ -32,7 +32,7 @@ use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory; use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use serde_json::json; use serde_json::{Value, json};
use sp_core::crypto::Ss58Codec; use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32; use sp_runtime::AccountId32;
@@ -47,12 +47,9 @@ use tracing::{instrument, trace};
use crate::{ use crate::{
Node, Node,
constants::{CHAIN_ID, INITIAL_BALANCE}, constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior}, helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
}; };
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -79,6 +76,7 @@ pub struct SubstrateNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>, consensus: Option<String>,
use_fallback_gas_filler: bool,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -105,6 +103,7 @@ impl SubstrateNode {
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
existing_connection_strings: &[String], existing_connection_strings: &[String],
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_path = let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path(); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
@@ -137,10 +136,13 @@ impl SubstrateNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
consensus, consensus,
use_fallback_gas_filler,
} }
} }
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
static CHAINSPEC_MUTEX: Mutex<Option<Value>> = Mutex::new(None);
if !self.rpc_url.is_empty() { if !self.rpc_url.is_empty() {
return Ok(self); return Ok(self);
} }
@@ -159,12 +161,22 @@ impl SubstrateNode {
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
trace!("Creating the node genesis"); trace!("Creating the node genesis");
let chainspec_json = Self::node_genesis( let chainspec_json = {
&self.node_binary, let mut chainspec_mutex = CHAINSPEC_MUTEX.lock().expect("Poisoned");
&self.export_chainspec_command, match chainspec_mutex.as_ref() {
&self.wallet, Some(chainspec_json) => chainspec_json.clone(),
) None => {
.context("Failed to prepare the chainspec command")?; let chainspec_json = Self::node_genesis(
&self.node_binary,
&self.export_chainspec_command,
&self.wallet,
)
.context("Failed to prepare the chainspec command")?;
*chainspec_mutex = Some(chainspec_json.clone());
chainspec_json
}
}
};
trace!("Writing the node genesis"); trace!("Writing the node genesis");
serde_json::to_writer_pretty( serde_json::to_writer_pretty(
@@ -312,8 +324,9 @@ impl SubstrateNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(), self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 50_000_000_000, 1_000_000_000), FallbackGasFiller::default()
ChainIdFiller::new(Some(CHAIN_ID)), .with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -418,11 +431,15 @@ impl EthereumNode for SubstrateNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self self.provider()
.provider()
.await .await
.context("Failed to create the provider")?; .context("Failed to create provider for transaction submission")?
execute_transaction(provider, transaction).await .send_transaction(transaction)
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -813,6 +830,7 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
@@ -884,6 +902,7 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
// Call `init()` // Call `init()`
@@ -76,10 +76,7 @@ use crate::{
Node, Node,
constants::INITIAL_BALANCE, constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior}, helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
}; };
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -114,6 +111,8 @@ pub struct ZombienetNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl ZombienetNode { impl ZombienetNode {
@@ -137,6 +136,7 @@ impl ZombienetNode {
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context) let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path .path
@@ -164,6 +164,7 @@ impl ZombienetNode {
connection_string: String::new(), connection_string: String::new(),
node_rpc_port: None, node_rpc_port: None,
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -330,7 +331,8 @@ impl ZombienetNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000), FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -428,14 +430,18 @@ impl EthereumNode for ZombienetNode {
fn execute_transaction( fn execute_transaction(
&self, &self,
transaction: alloy::rpc::types::TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self self.provider()
.provider()
.await .await
.context("Failed to create the provider")?; .context("Failed to create provider for transaction submission")?
execute_transaction(provider, transaction).await .send_transaction(transaction)
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -823,6 +829,7 @@ mod tests {
let mut node = ZombienetNode::new( let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
let genesis = context.genesis_configuration.genesis().unwrap().clone(); let genesis = context.genesis_configuration.genesis().unwrap().clone();
node.init(genesis).unwrap(); node.init(genesis).unwrap();
@@ -936,6 +943,7 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -956,6 +964,7 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -1,42 +1,69 @@
use alloy::{ use alloy::{
eips::BlockNumberOrTag,
network::{Network, TransactionBuilder}, network::{Network, TransactionBuilder},
providers::{ providers::{
Provider, SendableTx, Provider, SendableTx,
fillers::{GasFiller, TxFiller}, ext::DebugApi,
fillers::{GasFillable, GasFiller, TxFiller},
}, },
transports::TransportResult, rpc::types::trace::geth::{
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
GethDebugTracingOptions,
},
transports::{RpcError, TransportResult},
}; };
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding) /// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions.
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120; ///
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100; /// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
#[derive(Clone, Debug)] /// failing transactions. In this codebase, this is very important since the MatterLabs tests
/// expect some transactions in the test suite revert. Since we're expected to run a number of
/// assertions on these reverting transactions we must commit them to the ledger.
///
/// Therefore, this struct does the following:
///
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
/// transaction needs until it reverts.
/// 3. We fill in these values (either the success or failure case) into the transaction.
///
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
///
/// We then fill in these values into the transaction.
///
/// The previous implementation of this fallback gas filler relied on making use of default values
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
#[derive(Clone, Copy, Debug)]
pub struct FallbackGasFiller { pub struct FallbackGasFiller {
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
inner: GasFiller, inner: GasFiller,
default_gas_limit: u64,
default_max_fee_per_gas: u128, /// A [`bool`] that controls if the fallback mechanism is enabled or not.
default_priority_fee: u128, enable_fallback_mechanism: bool,
} }
impl FallbackGasFiller { impl FallbackGasFiller {
pub fn new( pub fn new() -> Self {
default_gas_limit: u64,
default_max_fee_per_gas: u128,
default_priority_fee: u128,
) -> Self {
Self { Self {
inner: GasFiller, inner: Default::default(),
default_gas_limit, enable_fallback_mechanism: true,
default_max_fee_per_gas,
default_priority_fee,
} }
} }
}
impl Default for FallbackGasFiller { pub fn with_fallback_mechanism(mut self, enable: bool) -> Self {
fn default() -> Self { self.enable_fallback_mechanism = enable;
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000) self
}
pub fn with_fallback_mechanism_enabled(self) -> Self {
self.with_fallback_mechanism(true)
}
pub fn with_fallback_mechanism_disabled(self) -> Self {
self.with_fallback_mechanism(false)
} }
} }
@@ -44,27 +71,84 @@ impl<N> TxFiller<N> for FallbackGasFiller
where where
N: Network, N: Network,
{ {
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>; type Fillable = <GasFiller as TxFiller<N>>::Fillable;
fn status( fn status(
&self, &self,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> alloy::providers::fillers::FillerControlFlow { ) -> alloy::providers::fillers::FillerControlFlow {
<GasFiller as TxFiller<N>>::status(&self.inner, tx) TxFiller::<N>::status(&self.inner, tx)
} }
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {} fn fill_sync(&self, _: &mut SendableTx<N>) {}
async fn prepare<P: Provider<N>>( async fn prepare<P: Provider<N>>(
&self, &self,
provider: &P, provider: &P,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> { ) -> TransportResult<Self::Fillable> {
match self.inner.prepare(provider, tx).await { match (
Ok(fill) => Ok(Some(fill)), self.inner.prepare(provider, tx).await,
Err(err) => { self.enable_fallback_mechanism,
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback"); ) {
Ok(None) // Return the same thing if either this calls succeeds, or if the call falls and the
// fallback mechanism is disabled.
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
(Err(..), true) => {
// Perform a trace of the transaction.
let trace = provider
.debug_trace_call(
tx.clone(),
BlockNumberOrTag::Latest.into(),
GethDebugTracingCallOptions {
tracing_options: GethDebugTracingOptions {
tracer: Some(GethDebugTracerType::BuiltInTracer(
GethDebugBuiltInTracerType::CallTracer,
)),
..Default::default()
},
state_overrides: Default::default(),
block_overrides: Default::default(),
tx_index: Default::default(),
},
)
.await?
.try_into_call_frame()
.map_err(|err| {
RpcError::local_usage_str(
format!("Expected a callframe trace, but got: {err:?}").as_str(),
)
})?;
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
RpcError::local_usage_str(
"Transaction trace returned a value of gas used that exceeds u64",
)
})?;
let gas_limit = gas_used.saturating_mul(2);
if let Some(gas_price) = tx.gas_price() {
return Ok(GasFillable::Legacy {
gas_limit,
gas_price,
});
}
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
{
alloy::eips::eip1559::Eip1559Estimation {
max_fee_per_gas,
max_priority_fee_per_gas,
}
} else {
provider.estimate_eip1559_fees().await?
};
Ok(GasFillable::Eip1559 {
gas_limit,
estimate,
})
} }
} }
} }
@@ -72,27 +156,14 @@ where
async fn fill( async fn fill(
&self, &self,
fillable: Self::Fillable, fillable: Self::Fillable,
mut tx: alloy::providers::SendableTx<N>, tx: SendableTx<N>,
) -> TransportResult<SendableTx<N>> { ) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable { self.inner.fill(fillable, tx).await
let mut tx = self.inner.fill(fill, tx).await?; }
if let Some(builder) = tx.as_mut_builder() { }
if let Some(estimated) = builder.gas_limit() {
let padded = estimated impl Default for FallbackGasFiller {
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR) fn default() -> Self {
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR)) Self::new()
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
}
Ok(tx)
}
} }
} }
+2
View File
@@ -1,7 +1,9 @@
mod concurrency_limiter; mod concurrency_limiter;
mod fallback_gas_filler; mod fallback_gas_filler;
mod provider; mod provider;
mod receipt_retry_layer;
pub use concurrency_limiter::*; pub use concurrency_limiter::*;
pub use fallback_gas_filler::*; pub use fallback_gas_filler::*;
pub use provider::*; pub use provider::*;
pub use receipt_retry_layer::*;
+5 -73
View File
@@ -1,18 +1,16 @@
use std::{ops::ControlFlow, sync::LazyLock, time::Duration}; use std::sync::LazyLock;
use alloy::{ use alloy::{
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844}, network::{Network, NetworkWallet, TransactionBuilder4844},
providers::{ providers::{
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider, Identity, ProviderBuilder, RootProvider,
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller}, fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
}, },
rpc::client::ClientBuilder, rpc::client::ClientBuilder,
}; };
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::{Instrument, debug, info, info_span};
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller}; use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller, RetryLayer};
pub type ConcreteProvider<N, W> = FillProvider< pub type ConcreteProvider<N, W> = FillProvider<
JoinFill< JoinFill<
@@ -48,6 +46,7 @@ where
let client = ClientBuilder::default() let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone()) .layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
.layer(RetryLayer::default())
.connect(rpc_url) .connect(rpc_url)
.await .await
.context("Failed to construct the RPC client")?; .context("Failed to construct the RPC client")?;
@@ -63,70 +62,3 @@ where
Ok(provider) Ok(provider)
} }
pub async fn execute_transaction<N, W>(
provider: ConcreteProvider<N, W>,
transaction: N::TransactionRequest,
) -> Result<N::ReceiptResponse>
where
N: Network<
TransactionRequest: TransactionBuilder4844,
TxEnvelope = <Ethereum as Network>::TxEnvelope,
>,
W: NetworkWallet<N>,
Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>,
{
let sendable_transaction = provider
.fill(transaction)
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction
.try_into_envelope()
.context("Failed to convert transaction into an envelope")?;
let tx_hash = *transaction_envelope.tx_hash();
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
Ok(pending_transaction) => pending_transaction,
Err(error) => {
let error_string = error.to_string();
if error_string.contains("Transaction Already Imported") {
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
} else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
}
}
};
debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!(
"Transaction inclusion watching timeout for {tx_hash}"
))?;
poll(
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|| {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => {
info!("Found the transaction receipt");
Ok(ControlFlow::Break(receipt))
}
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.instrument(info_span!("Polling for receipt", %tx_hash))
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
@@ -0,0 +1,158 @@
use std::time::Duration;
use alloy::{
network::{AnyNetwork, Network},
rpc::json_rpc::{RequestPacket, ResponsePacket},
transports::{TransportError, TransportErrorKind, TransportFut},
};
use tokio::time::{interval, timeout};
use tower::{Layer, Service};
/// A layer that allows for automatic retries for getting the receipt.
///
/// There are certain cases where getting the receipt of a committed transaction might fail. In Geth
/// this can happen if the transaction has been committed to the ledger but has not been indexed, in
/// the substrate and revive stack it can also happen for other reasons.
///
/// Therefore, just because the first attempt to get the receipt (after transaction confirmation)
/// has failed it doesn't mean that it will continue to fail. This layer can be added to any alloy
/// provider to allow the provider to retry getting the receipt for some period of time before it
/// considers that a timeout. It attempts to poll for the receipt for the `polling_duration` with an
/// interval of `polling_interval` between each poll. If by the end of the `polling_duration` it was
/// not able to get the receipt successfully then this is considered to be a timeout.
///
/// Additionally, this layer allows for retries for other rpc methods such as all tracing methods.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RetryLayer {
/// The amount of time to keep polling for the receipt before considering it a timeout.
polling_duration: Duration,
/// The interval of time to wait between each poll for the receipt.
polling_interval: Duration,
}
impl RetryLayer {
pub fn new(polling_duration: Duration, polling_interval: Duration) -> Self {
Self {
polling_duration,
polling_interval,
}
}
pub fn with_polling_duration(mut self, polling_duration: Duration) -> Self {
self.polling_duration = polling_duration;
self
}
pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self {
self.polling_interval = polling_interval;
self
}
}
impl Default for RetryLayer {
fn default() -> Self {
Self {
polling_duration: Duration::from_secs(90),
polling_interval: Duration::from_millis(500),
}
}
}
impl<S> Layer<S> for RetryLayer {
type Service = RetryService<S>;
fn layer(&self, inner: S) -> Self::Service {
RetryService {
service: inner,
polling_duration: self.polling_duration,
polling_interval: self.polling_interval,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RetryService<S> {
/// The internal service.
service: S,
/// The amount of time to keep polling for the receipt before considering it a timeout.
polling_duration: Duration,
/// The interval of time to wait between each poll for the receipt.
polling_interval: Duration,
}
impl<S> Service<RequestPacket> for RetryService<S>
where
S: Service<RequestPacket, Future = TransportFut<'static>, Error = TransportError>
+ Send
+ 'static
+ Clone,
{
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
#[allow(clippy::nonminimal_bool)]
fn call(&mut self, req: RequestPacket) -> Self::Future {
type ReceiptOutput = <AnyNetwork as Network>::ReceiptResponse;
let mut service = self.service.clone();
let polling_interval = self.polling_interval;
let polling_duration = self.polling_duration;
Box::pin(async move {
let request = req.as_single().ok_or_else(|| {
TransportErrorKind::custom_str("Retry layer doesn't support batch requests")
})?;
let method = request.method();
let requires_retries = method == "eth_getTransactionReceipt"
|| (method.contains("debug") && method.contains("trace"));
if !requires_retries {
return service.call(req).await;
}
timeout(polling_duration, async {
let mut interval = interval(polling_interval);
loop {
interval.tick().await;
let Ok(resp) = service.call(req.clone()).await else {
continue;
};
let response = resp.as_single().expect("Can't fail");
if response.is_error() {
continue;
}
if method == "eth_getTransactionReceipt"
&& response
.payload()
.clone()
.deserialize_success::<ReceiptOutput>()
.ok()
.and_then(|resp| resp.try_into_success().ok())
.is_some()
|| method != "eth_getTransactionReceipt"
{
return resp;
} else {
continue;
}
}
})
.await
.map_err(|_| TransportErrorKind::custom_str("Timeout when retrying request"))
})
}
}
+25
View File
@@ -0,0 +1,25 @@
[package]
name = "revive-dt-report-processor"
description = "revive differential testing report processor utility"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "report-processor"
path = "src/main.rs"
[dependencies]
revive-dt-report = { workspace = true }
revive-dt-common = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
[lints]
workspace = true
+329
View File
@@ -0,0 +1,329 @@
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet},
fmt::Display,
fs::{File, OpenOptions},
ops::{Deref, DerefMut},
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{Context as _, Error, Result, bail};
use clap::Parser;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use revive_dt_common::types::{Mode, ParsedTestSpecifier};
use revive_dt_report::{Report, TestCaseStatus};
fn main() -> Result<()> {
let cli = Cli::try_parse().context("Failed to parse the CLI arguments")?;
match cli {
Cli::GenerateExpectationsFile {
report_path,
output_path: output_file,
remove_prefix,
} => {
let remove_prefix = remove_prefix
.into_iter()
.map(|path| path.canonicalize().context("Failed to canonicalize path"))
.collect::<Result<Vec<_>>>()?;
let expectations = report_path
.execution_information
.iter()
.flat_map(|(metadata_file_path, metadata_file_report)| {
metadata_file_report
.case_reports
.iter()
.map(move |(case_idx, case_report)| {
(metadata_file_path, case_idx, case_report)
})
})
.flat_map(|(metadata_file_path, case_idx, case_report)| {
case_report.mode_execution_reports.iter().map(
move |(mode, execution_report)| {
(
metadata_file_path,
case_idx,
mode,
execution_report.status.as_ref(),
)
},
)
})
.filter_map(|(metadata_file_path, case_idx, mode, status)| {
status.map(|status| (metadata_file_path, case_idx, mode, status))
})
.map(|(metadata_file_path, case_idx, mode, status)| {
(
TestSpecifier {
metadata_file_path: Cow::Borrowed(
remove_prefix
.iter()
.filter_map(|prefix| {
metadata_file_path.as_inner().strip_prefix(prefix).ok()
})
.next()
.unwrap_or(metadata_file_path.as_inner()),
),
case_idx: case_idx.into_inner(),
mode: Cow::Borrowed(mode),
},
Status::from(status),
)
})
.filter(|(_, status)| *status == Status::Failed)
.collect::<Expectations>();
let output_file = OpenOptions::new()
.truncate(true)
.create(true)
.write(true)
.open(output_file)
.context("Failed to create the output file")?;
serde_json::to_writer_pretty(output_file, &expectations)
.context("Failed to write the expectations to file")?;
}
Cli::CompareExpectationFiles {
base_expectation_path,
other_expectation_path,
} => {
let keys = base_expectation_path
.keys()
.chain(other_expectation_path.keys())
.collect::<BTreeSet<_>>();
for key in keys {
let base_status = base_expectation_path.get(key).context(format!(
"Entry not found in the base expectations: \"{}\"",
key
))?;
let other_status = other_expectation_path.get(key).context(format!(
"Entry not found in the other expectations: \"{}\"",
key
))?;
if base_status != other_status {
bail!(
"Expectations for entry \"{}\" have changed. They were {:?} and now they are {:?}",
key,
base_status,
other_status
)
}
}
}
};
Ok(())
}
type Expectations<'a> = BTreeMap<TestSpecifier<'a>, Status>;
/// A tool that's used to process the reports generated by the retester binary in various ways.
#[derive(Clone, Debug, Parser)]
#[command(name = "retester", term_width = 100)]
pub enum Cli {
/// Generates an expectation file out of a given report.
GenerateExpectationsFile {
/// The path of the report's JSON file to generate the expectation's file for.
#[clap(long)]
report_path: JsonFile<Report>,
/// The path of the output file to generate.
///
/// Note that we expect that:
/// 1. The provided path points to a JSON file.
/// 1. The ancestor's of the provided path already exist such that no directory creations
/// are required.
#[clap(long)]
output_path: PathBuf,
/// Prefix paths to remove from the paths in the final expectations file.
#[clap(long)]
remove_prefix: Vec<PathBuf>,
},
/// Compares two expectation files to ensure that they match each other.
CompareExpectationFiles {
/// The path of the base expectation file.
#[clap(long)]
base_expectation_path: JsonFile<Expectations<'static>>,
/// The path of the other expectation file.
#[clap(long)]
other_expectation_path: JsonFile<Expectations<'static>>,
},
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum Status {
Succeeded,
Failed,
Ignored,
}
impl From<TestCaseStatus> for Status {
fn from(value: TestCaseStatus) -> Self {
match value {
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
TestCaseStatus::Failed { .. } => Self::Failed,
TestCaseStatus::Ignored { .. } => Self::Ignored,
}
}
}
impl<'a> From<&'a TestCaseStatus> for Status {
fn from(value: &'a TestCaseStatus) -> Self {
match value {
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
TestCaseStatus::Failed { .. } => Self::Failed,
TestCaseStatus::Ignored { .. } => Self::Ignored,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JsonFile<T> {
path: PathBuf,
content: Box<T>,
}
impl<T> Deref for JsonFile<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.content
}
}
impl<T> DerefMut for JsonFile<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.content
}
}
impl<T> FromStr for JsonFile<T>
where
T: DeserializeOwned,
{
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let path = PathBuf::from(s);
let file = File::open(&path).context("Failed to open the file")?;
serde_json::from_reader(&file)
.map(|content| Self { path, content })
.context(format!(
"Failed to deserialize file's content as {}",
std::any::type_name::<T>()
))
}
}
impl<T> Display for JsonFile<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.path.display(), f)
}
}
impl<T> From<JsonFile<T>> for String {
fn from(value: JsonFile<T>) -> Self {
value.to_string()
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct TestSpecifier<'a> {
pub metadata_file_path: Cow<'a, Path>,
pub case_idx: usize,
pub mode: Cow<'a, Mode>,
}
impl<'a> Display for TestSpecifier<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}::{}::{}",
self.metadata_file_path.display(),
self.case_idx,
self.mode
)
}
}
impl<'a> From<TestSpecifier<'a>> for ParsedTestSpecifier {
fn from(
TestSpecifier {
metadata_file_path,
case_idx,
mode,
}: TestSpecifier,
) -> Self {
Self::CaseWithMode {
metadata_file_path: metadata_file_path.to_path_buf(),
case_idx,
mode: mode.into_owned(),
}
}
}
impl TryFrom<ParsedTestSpecifier> for TestSpecifier<'static> {
type Error = Error;
fn try_from(value: ParsedTestSpecifier) -> Result<Self> {
let ParsedTestSpecifier::CaseWithMode {
metadata_file_path,
case_idx,
mode,
} = value
else {
bail!("Expected a full test case specifier")
};
Ok(Self {
metadata_file_path: Cow::Owned(metadata_file_path),
case_idx,
mode: Cow::Owned(mode),
})
}
}
impl<'a> Serialize for TestSpecifier<'a> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_string().serialize(serializer)
}
}
impl<'d, 'a> Deserialize<'d> for TestSpecifier<'a> {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: serde::Deserializer<'d>,
{
let string = String::deserialize(deserializer)?;
let mut splitted = string.split("::");
let (Some(metadata_file_path), Some(case_idx), Some(mode), None) = (
splitted.next(),
splitted.next(),
splitted.next(),
splitted.next(),
) else {
return Err(serde::de::Error::custom(
"Test specifier doesn't contain the components required",
));
};
let metadata_file_path = PathBuf::from(metadata_file_path);
let case_idx = usize::from_str(case_idx)
.map_err(|_| serde::de::Error::custom("Case idx is not a usize"))?;
let mode = Mode::from_str(mode).map_err(|_| serde::de::Error::custom("Invalid mode"))?;
Ok(Self {
metadata_file_path: Cow::Owned(metadata_file_path),
case_idx,
mode: Cow::Owned(mode),
})
}
}
+40 -32
View File
@@ -36,6 +36,8 @@ pub struct ReportAggregator {
runner_tx: Option<UnboundedSender<RunnerEvent>>, runner_tx: Option<UnboundedSender<RunnerEvent>>,
runner_rx: UnboundedReceiver<RunnerEvent>, runner_rx: UnboundedReceiver<RunnerEvent>,
listener_tx: Sender<ReporterEvent>, listener_tx: Sender<ReporterEvent>,
/* Context */
file_name: Option<String>,
} }
impl ReportAggregator { impl ReportAggregator {
@@ -43,6 +45,11 @@ impl ReportAggregator {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF); let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
Self { Self {
file_name: match context {
Context::Test(ref context) => context.report_configuration.file_name.clone(),
Context::Benchmark(ref context) => context.report_configuration.file_name.clone(),
Context::ExportJsonSchema | Context::ExportGenesis(..) => None,
},
report: Report::new(context), report: Report::new(context),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
@@ -51,7 +58,7 @@ impl ReportAggregator {
} }
} }
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) { pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
let reporter = self let reporter = self
.runner_tx .runner_tx
.take() .take()
@@ -60,7 +67,7 @@ impl ReportAggregator {
(reporter, async move { self.aggregate().await }) (reporter, async move { self.aggregate().await })
} }
async fn aggregate(mut self) -> Result<()> { async fn aggregate(mut self) -> Result<Report> {
debug!("Starting to aggregate report"); debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await { while let Some(event) = self.runner_rx.recv().await {
@@ -121,7 +128,7 @@ impl ReportAggregator {
self.handle_completion(CompletionEvent {}); self.handle_completion(CompletionEvent {});
debug!("Report aggregation completed"); debug!("Report aggregation completed");
let file_name = { let default_file_name = {
let current_timestamp = SystemTime::now() let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
@@ -130,6 +137,7 @@ impl ReportAggregator {
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_name = self.file_name.unwrap_or(default_file_name);
let file_path = self let file_path = self
.report .report
.context .context
@@ -152,7 +160,7 @@ impl ReportAggregator {
format!("Failed to serialize report JSON to {}", file_path.display()) format!("Failed to serialize report JSON to {}", file_path.display())
})?; })?;
Ok(()) Ok(self.report)
} }
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
@@ -562,7 +570,7 @@ pub struct Report {
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
pub metadata_files: BTreeSet<MetadataFilePath>, pub metadata_files: BTreeSet<MetadataFilePath>,
/// Metrics from the execution. /// Metrics from the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// Information relating to each test case. /// Information relating to each test case.
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>, pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
@@ -582,7 +590,7 @@ impl Report {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct MetadataFileReport { pub struct MetadataFileReport {
/// Metrics from the execution. /// Metrics from the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// The report of each case keyed by the case idx. /// The report of each case keyed by the case idx.
pub case_reports: BTreeMap<CaseIdx, CaseReport>, pub case_reports: BTreeMap<CaseIdx, CaseReport>,
@@ -592,7 +600,7 @@ pub struct MetadataFileReport {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct CaseReport { pub struct CaseReport {
/// Metrics from the execution. /// Metrics from the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// The [`ExecutionReport`] for each one of the [`Mode`]s. /// The [`ExecutionReport`] for each one of the [`Mode`]s.
#[serde_as(as = "HashMap<DisplayFromStr, _>")] #[serde_as(as = "HashMap<DisplayFromStr, _>")]
@@ -602,31 +610,31 @@ pub struct CaseReport {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct ExecutionReport { pub struct ExecutionReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored. /// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>, pub status: Option<TestCaseStatus>,
/// Metrics from the execution. /// Metrics from the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// Information related to the execution on one of the platforms. /// Information related to the execution on one of the platforms.
#[serde(skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>, pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
/// Information on the compiled contracts. /// Information on the compiled contracts.
#[serde(skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>, pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
/// The addresses of the deployed contracts /// The addresses of the deployed contracts
#[serde(skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>, pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
/// Information on the mined blocks as part of this execution. /// Information on the mined blocks as part of this execution.
#[serde(skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>, pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
/// Information tracked for each step that was executed. /// Information tracked for each step that was executed.
#[serde(skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub steps: BTreeMap<StepPath, StepReport>, pub steps: BTreeMap<StepPath, StepReport>,
} }
/// Information related to the status of the test. Could be that the test succeeded, failed, or that /// Information related to the status of the test. Could be that the test succeeded, failed, or that
/// it was ignored. /// it was ignored.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "status")] #[serde(tag = "status")]
pub enum TestCaseStatus { pub enum TestCaseStatus {
/// The test case succeeded. /// The test case succeeded.
@@ -664,19 +672,19 @@ pub struct TestCaseNodeInformation {
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ExecutionInformation { pub struct ExecutionInformation {
/// Information related to the node assigned to this test case. /// Information related to the node assigned to this test case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub node: Option<TestCaseNodeInformation>, pub node: Option<TestCaseNodeInformation>,
/// Information on the pre-link compiled contracts. /// Information on the pre-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub pre_link_compilation_status: Option<CompilationStatus>, pub pre_link_compilation_status: Option<CompilationStatus>,
/// Information on the post-link compiled contracts. /// Information on the post-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub post_link_compilation_status: Option<CompilationStatus>, pub post_link_compilation_status: Option<CompilationStatus>,
/// Information on the deployed libraries. /// Information on the deployed libraries.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>, pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
/// Information on the deployed contracts. /// Information on the deployed contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>, pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
} }
@@ -695,11 +703,11 @@ pub enum CompilationStatus {
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI contexts. /// CLI contexts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
/// The compilation failed. /// The compilation failed.
@@ -707,15 +715,15 @@ pub enum CompilationStatus {
/// The failure reason. /// The failure reason.
reason: String, reason: String,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>, compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
} }
@@ -743,24 +751,24 @@ pub struct Metrics {
pub gas_per_second: Metric<u64>, pub gas_per_second: Metric<u64>,
/* Block Fullness */ /* Block Fullness */
pub gas_block_fullness: Metric<u64>, pub gas_block_fullness: Metric<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub ref_time_block_fullness: Option<Metric<u64>>, pub ref_time_block_fullness: Option<Metric<u64>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub proof_size_block_fullness: Option<Metric<u64>>, pub proof_size_block_fullness: Option<Metric<u64>>,
} }
/// The data that we store for a given metric (e.g., TPS). /// The data that we store for a given metric (e.g., TPS).
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Metric<T> { pub struct Metric<T> {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<PlatformKeyedInformation<T>>, pub minimum: Option<PlatformKeyedInformation<T>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<PlatformKeyedInformation<T>>, pub maximum: Option<PlatformKeyedInformation<T>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub mean: Option<PlatformKeyedInformation<T>>, pub mean: Option<PlatformKeyedInformation<T>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub median: Option<PlatformKeyedInformation<T>>, pub median: Option<PlatformKeyedInformation<T>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub raw: Option<PlatformKeyedInformation<Vec<T>>>, pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
} }
+98 -29
View File
@@ -28,7 +28,7 @@ from __future__ import annotations
import json import json
import sys import sys
import csv import csv
from typing import List, Mapping, TypedDict from typing import List, Mapping, TypedDict, no_type_check
class EthereumMinedBlockInformation(TypedDict): class EthereumMinedBlockInformation(TypedDict):
@@ -69,7 +69,43 @@ class MinedBlockInformation(TypedDict):
"""Block-level information for a mined block with both EVM and optional Substrate fields.""" """Block-level information for a mined block with both EVM and optional Substrate fields."""
ethereum_block_information: EthereumMinedBlockInformation ethereum_block_information: EthereumMinedBlockInformation
substrate_block_information: SubstrateMinedBlockInformation substrate_block_information: SubstrateMinedBlockInformation | None
def substrate_block_information_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["ref_time"]
def substrate_block_information_max_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_ref_time"]
def substrate_block_information_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["proof_size"]
def substrate_block_information_max_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_proof_size"]
class Metric(TypedDict): class Metric(TypedDict):
@@ -100,8 +136,19 @@ class Metrics(TypedDict):
transaction_per_second: Metric transaction_per_second: Metric
gas_per_second: Metric gas_per_second: Metric
gas_block_fullness: Metric gas_block_fullness: Metric
ref_time_block_fullness: Metric ref_time_block_fullness: Metric | None
proof_size_block_fullness: Metric proof_size_block_fullness: Metric | None
@no_type_check
def metrics_raw_item(
metrics: Metrics, name: str, target: str, index: int
) -> int | None:
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
try:
return l[index]
except:
return None
class ExecutionReport(TypedDict): class ExecutionReport(TypedDict):
@@ -144,12 +191,15 @@ BlockInformation = TypedDict(
"Transaction Count": int, "Transaction Count": int,
"TPS": int | None, "TPS": int | None,
"GPS": int | None, "GPS": int | None,
"Ref Time": int, "Gas Mined": int,
"Max Ref Time": int, "Block Gas Limit": int,
"Block Fullness Ref Time": int, "Block Fullness Gas": float,
"Proof Size": int, "Ref Time": int | None,
"Max Proof Size": int, "Max Ref Time": int | None,
"Block Fullness Proof Size": int, "Block Fullness Ref Time": int | None,
"Proof Size": int | None,
"Max Proof Size": int | None,
"Block Fullness Proof Size": int | None,
}, },
) )
"""A typed dictionary used to hold all of the block information""" """A typed dictionary used to hold all of the block information"""
@@ -175,7 +225,7 @@ def main() -> None:
report: ReportRoot = load_report(report_path) report: ReportRoot = load_report(report_path)
# TODO: Remove this in the future, but for now, the target is fixed. # TODO: Remove this in the future, but for now, the target is fixed.
target: str = "revive-dev-node-revm-solc" target: str = sys.argv[2]
csv_writer = csv.writer(sys.stdout) csv_writer = csv.writer(sys.stdout)
@@ -188,6 +238,12 @@ def main() -> None:
resolved_blocks: list[BlockInformation] = [] resolved_blocks: list[BlockInformation] = []
for i, block_information in enumerate(blocks_information): for i, block_information in enumerate(blocks_information):
mined_gas: int = block_information["ethereum_block_information"][
"mined_gas"
]
block_gas_limit: int = block_information[
"ethereum_block_information"
]["block_gas_limit"]
resolved_blocks.append( resolved_blocks.append(
{ {
"Block Number": block_information[ "Block Number": block_information[
@@ -216,24 +272,37 @@ def main() -> None:
"raw" "raw"
][target][i - 1] ][target][i - 1]
), ),
"Ref Time": block_information[ "Gas Mined": block_information[
"substrate_block_information" "ethereum_block_information"
]["ref_time"], ]["mined_gas"],
"Max Ref Time": block_information[ "Block Gas Limit": block_information[
"substrate_block_information" "ethereum_block_information"
]["max_ref_time"], ]["block_gas_limit"],
"Block Fullness Ref Time": execution_report["metrics"][ "Block Fullness Gas": mined_gas / block_gas_limit,
"ref_time_block_fullness" "Ref Time": substrate_block_information_ref_time(
]["raw"][target][i], block_information["substrate_block_information"]
"Proof Size": block_information[ ),
"substrate_block_information" "Max Ref Time": substrate_block_information_max_ref_time(
]["proof_size"], block_information["substrate_block_information"]
"Max Proof Size": block_information[ ),
"substrate_block_information" "Block Fullness Ref Time": metrics_raw_item(
]["max_proof_size"], execution_report["metrics"],
"Block Fullness Proof Size": execution_report["metrics"][ "ref_time_block_fullness",
"proof_size_block_fullness" target,
]["raw"][target][i], i,
),
"Proof Size": substrate_block_information_proof_size(
block_information["substrate_block_information"]
),
"Max Proof Size": substrate_block_information_max_proof_size(
block_information["substrate_block_information"]
),
"Block Fullness Proof Size": metrics_raw_item(
execution_report["metrics"],
"proof_size_block_fullness",
target,
i,
),
} }
) )
+69 -36
View File
@@ -5,51 +5,54 @@ CI. The full models used in the JSON report can be found in the revive different
the models used in this script are just a partial reproduction of the full report models. the models used in this script are just a partial reproduction of the full report models.
""" """
from typing import TypedDict, Literal, Union import json, typing, io, sys
import json, io
class Report(TypedDict): class Report(typing.TypedDict):
context: "Context" context: "Context"
execution_information: dict[ execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
"MetadataFilePathString",
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
]
class Context(TypedDict): class MetadataFileReport(typing.TypedDict):
case_reports: dict["CaseIdxString", "CaseReport"]
class CaseReport(typing.TypedDict):
mode_execution_reports: dict["ModeString", "ExecutionReport"]
class ExecutionReport(typing.TypedDict):
status: "TestCaseStatus"
class Context(typing.TypedDict):
Test: "TestContext" Test: "TestContext"
class TestContext(TypedDict): class TestContext(typing.TypedDict):
corpus_configuration: "CorpusConfiguration" corpus_configuration: "CorpusConfiguration"
class CorpusConfiguration(TypedDict): class CorpusConfiguration(typing.TypedDict):
test_specifiers: list["TestSpecifier"] test_specifiers: list["TestSpecifier"]
class CaseReport(TypedDict): class CaseStatusSuccess(typing.TypedDict):
status: "CaseStatus" status: typing.Literal["Succeeded"]
class CaseStatusSuccess(TypedDict):
status: Literal["Succeeded"]
steps_executed: int steps_executed: int
class CaseStatusFailure(TypedDict): class CaseStatusFailure(typing.TypedDict):
status: Literal["Failed"] status: typing.Literal["Failed"]
reason: str reason: str
class CaseStatusIgnored(TypedDict): class CaseStatusIgnored(typing.TypedDict):
status: Literal["Ignored"] status: typing.Literal["Ignored"]
reason: str reason: str
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored] TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
"""A union type of all of the possible statuses that could be reported for a case.""" """A union type of all of the possible statuses that could be reported for a case."""
TestSpecifier = str TestSpecifier = str
@@ -64,6 +67,12 @@ MetadataFilePathString = str
CaseIdxString = str CaseIdxString = str
"""The index of a case as a string. For example '0'""" """The index of a case as a string. For example '0'"""
PlatformString = typing.Union[
typing.Literal["revive-dev-node-revm-solc"],
typing.Literal["revive-dev-node-polkavm-resolc"],
]
"""A string of the platform on which the test was run"""
def path_relative_to_resolc_compiler_test_directory(path: str) -> str: def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
""" """
@@ -78,12 +87,22 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
def main() -> None: def main() -> None:
with open("report.json", "r") as file: with open(sys.argv[1], "r") as file:
report: Report = json.load(file) report: Report = json.load(file)
# Getting the platform string and resolving it into a simpler version of
# itself.
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
if platform_identifier == "revive-dev-node-polkavm-resolc":
platform: str = "PolkaVM"
elif platform_identifier == "revive-dev-node-revm-solc":
platform: str = "REVM"
else:
platform: str = platform_identifier
# Starting the markdown document and adding information to it as we go. # Starting the markdown document and adding information to it as we go.
markdown_document: io.TextIOWrapper = open("report.md", "w") markdown_document: io.TextIOWrapper = open("report.md", "w")
print("# Differential Tests Results", file=markdown_document) print(f"# Differential Tests Results ({platform})", file=markdown_document)
# Getting all of the test specifiers from the report and making them relative to the tests dir. # Getting all of the test specifiers from the report and making them relative to the tests dir.
test_specifiers: list[str] = list( test_specifiers: list[str] = list(
@@ -94,7 +113,7 @@ def main() -> None:
) )
print("## Specified Tests", file=markdown_document) print("## Specified Tests", file=markdown_document)
for test_specifier in test_specifiers: for test_specifier in test_specifiers:
print(f"* `{test_specifier}`", file=markdown_document) print(f"* ``{test_specifier}``", file=markdown_document)
# Counting the total number of test cases, successes, failures, and ignored tests # Counting the total number of test cases, successes, failures, and ignored tests
total_number_of_cases: int = 0 total_number_of_cases: int = 0
@@ -102,9 +121,13 @@ def main() -> None:
total_number_of_failures: int = 0 total_number_of_failures: int = 0
total_number_of_ignores: int = 0 total_number_of_ignores: int = 0
for _, mode_to_case_mapping in report["execution_information"].items(): for _, mode_to_case_mapping in report["execution_information"].items():
for _, case_idx_to_report_mapping in mode_to_case_mapping.items(): for _, case_idx_to_report_mapping in mode_to_case_mapping[
for _, case_report in case_idx_to_report_mapping.items(): "case_reports"
status: CaseStatus = case_report["status"] ].items():
for _, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
total_number_of_cases += 1 total_number_of_cases += 1
if status["status"] == "Succeeded": if status["status"] == "Succeeded":
@@ -144,9 +167,13 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items(): for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
for case_idx_string, case_report in case_idx_to_report_mapping.items(): "case_reports"
status: CaseStatus = case_report["status"] ].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -183,9 +210,13 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items(): for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
for case_idx_string, case_report in case_idx_to_report_mapping.items(): "case_reports"
status: CaseStatus = case_report["status"] ].items():
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -194,7 +225,9 @@ def main() -> None:
if status["status"] != "Failed": if status["status"] != "Failed":
continue continue
failure_reason: str = status["reason"].replace("\n", " ") failure_reason: str = (
status["reason"].replace("\n", " ").replace("|", " ")
)
note: str = "" note: str = ""
modes_where_this_case_succeeded: set[ModeString] = ( modes_where_this_case_succeeded: set[ModeString] = (
@@ -212,7 +245,7 @@ def main() -> None:
f"{metadata_file_path}::{case_idx_string}::{mode_string}" f"{metadata_file_path}::{case_idx_string}::{mode_string}"
) )
print( print(
f"| `{test_specifier}` | `{failure_reason}` | {note} |", f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
file=markdown_document, file=markdown_document,
) )
print("\n\n</details>", file=markdown_document) print("\n\n</details>", file=markdown_document)