mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-21 23:48:01 +00:00
Added a CI action for running tests (#219)
* Add a CI action for running tests * Update the CI action fixing incorrect matrix usage
This commit is contained in:
@@ -0,0 +1,105 @@
|
||||
name: "Run Revive Differential Tests"
|
||||
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
|
||||
|
||||
inputs:
|
||||
# Setup arguments & environment
|
||||
polkadot-sdk-path:
|
||||
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
|
||||
required: false
|
||||
default: "."
|
||||
type: string
|
||||
cargo-command:
|
||||
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
|
||||
required: false
|
||||
default: "cargo"
|
||||
type: string
|
||||
revive-differential-tests-ref:
|
||||
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
|
||||
required: false
|
||||
default: "main"
|
||||
type: string
|
||||
resolc-version:
|
||||
description: "The version of resolc to install and use in tests."
|
||||
required: false
|
||||
default: "0.5.0"
|
||||
type: string
|
||||
use-compilation-caches:
|
||||
description: "Controls if the compilation caches will be used for the test run or not."
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
# Test Execution Arguments
|
||||
platform:
|
||||
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout the Differential Tests Repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
repository: paritytech/revive-differential-tests
|
||||
ref: ${{ inputs['revive-differential-tests-ref'] }}
|
||||
path: revive-differential-tests
|
||||
submodules: recursive
|
||||
- name: Installing the Latest Resolc
|
||||
shell: bash
|
||||
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
|
||||
run: |
|
||||
VERSION="${{ inputs['resolc-version'] }}"
|
||||
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
|
||||
echo "Downloading resolc v$VERSION from $ASSET_URL"
|
||||
curl -Lsf --show-error -o resolc "$ASSET_URL"
|
||||
chmod +x resolc
|
||||
./resolc --version
|
||||
- name: Installing Retester
|
||||
shell: bash
|
||||
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
|
||||
- name: Creating a workdir for retester
|
||||
shell: bash
|
||||
run: mkdir workdir
|
||||
- name: Downloading & Initializing the compilation caches
|
||||
shell: bash
|
||||
if: ${{ inputs['use-compilation-caches'] == true }}
|
||||
run: |
|
||||
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
|
||||
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
|
||||
- name: Building the dependencies from the Polkadot SDK
|
||||
shell: bash
|
||||
run: ${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
||||
- name: Running the Differential Tests
|
||||
shell: bash
|
||||
run: |
|
||||
${{ inputs['cargo-command'] }} run --locked --manifest-path revive-differential-tests/Cargo.toml -- test \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
|
||||
--platform ${{ inputs['platform'] }} \
|
||||
--concurrency.number-of-nodes 10 \
|
||||
--concurrency.number-of-threads 10 \
|
||||
--concurrency.number-of-concurrent-tasks 100 \
|
||||
--working-directory ./workdir \
|
||||
--revive-dev-node.consensus manual-seal-200 \
|
||||
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
|
||||
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
|
||||
--resolc.path ./resolc
|
||||
- name: Creating a markdown report of the test execution
|
||||
shell: bash
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
mv ./workdir/*.json report.json
|
||||
python3 revive-differential-tests/scripts/process-differential-tests-report.py report.json ${{ inputs['platform'] }}
|
||||
- name: Upload the Report to the CI
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: report-${{ inputs['platform'] }}.md
|
||||
path: report.md
|
||||
- name: Posting the report as a comment on the PR
|
||||
uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
header: diff-tests-report-${{ inputs['platform'] }}
|
||||
path: report.md
|
||||
+88
-6
@@ -2,9 +2,10 @@ mod differential_benchmarks;
|
||||
mod differential_tests;
|
||||
mod helpers;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use anyhow::{Context as _, bail};
|
||||
use clap::Parser;
|
||||
use revive_dt_report::ReportAggregator;
|
||||
use revive_dt_common::types::ParsedTestSpecifier;
|
||||
use revive_dt_report::{ReportAggregator, TestCaseStatus};
|
||||
use schemars::schema_for;
|
||||
use tracing::{info, level_filters::LevelFilter};
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
@@ -57,8 +58,47 @@ fn main() -> anyhow::Result<()> {
|
||||
let differential_tests_handling_task =
|
||||
handle_differential_tests(*context, reporter);
|
||||
|
||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||
.await?;
|
||||
let (_, report) = futures::future::try_join(
|
||||
differential_tests_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Error out if there are any failing tests.
|
||||
let failures = report
|
||||
.execution_information
|
||||
.into_iter()
|
||||
.flat_map(|(metadata_file_path, metadata_file_report)| {
|
||||
metadata_file_report.case_reports.into_iter().flat_map(
|
||||
move |(case_idx, case_report)| {
|
||||
let metadata_file_path = metadata_file_path.clone();
|
||||
case_report.mode_execution_reports.into_iter().filter_map(
|
||||
move |(mode, execution_report)| {
|
||||
if let Some(TestCaseStatus::Failed { reason }) =
|
||||
execution_report.status
|
||||
{
|
||||
let parsed_test_specifier =
|
||||
ParsedTestSpecifier::CaseWithMode {
|
||||
metadata_file_path: metadata_file_path
|
||||
.clone()
|
||||
.into_inner(),
|
||||
case_idx: case_idx.into_inner(),
|
||||
mode,
|
||||
};
|
||||
Some((parsed_test_specifier, reason))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !failures.is_empty() {
|
||||
bail!("Some tests failed: {failures:#?}")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
@@ -71,12 +111,48 @@ fn main() -> anyhow::Result<()> {
|
||||
let differential_benchmarks_handling_task =
|
||||
handle_differential_benchmarks(*context, reporter);
|
||||
|
||||
futures::future::try_join(
|
||||
let (_, report) = futures::future::try_join(
|
||||
differential_benchmarks_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Error out if there are any failing tests.
|
||||
let failures = report
|
||||
.execution_information
|
||||
.into_iter()
|
||||
.flat_map(|(metadata_file_path, metadata_file_report)| {
|
||||
metadata_file_report.case_reports.into_iter().flat_map(
|
||||
move |(case_idx, case_report)| {
|
||||
let metadata_file_path = metadata_file_path.clone();
|
||||
case_report.mode_execution_reports.into_iter().filter_map(
|
||||
move |(mode, execution_report)| {
|
||||
if let Some(TestCaseStatus::Failed { reason }) =
|
||||
execution_report.status
|
||||
{
|
||||
let parsed_test_specifier =
|
||||
ParsedTestSpecifier::CaseWithMode {
|
||||
metadata_file_path: metadata_file_path
|
||||
.clone()
|
||||
.into_inner(),
|
||||
case_idx: case_idx.into_inner(),
|
||||
mode,
|
||||
};
|
||||
Some((parsed_test_specifier, reason))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !failures.is_empty() {
|
||||
bail!("Some tests failed: {failures:#?}")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::ExportGenesis(ref export_genesis_context) => {
|
||||
@@ -85,11 +161,17 @@ fn main() -> anyhow::Result<()> {
|
||||
let genesis_json = serde_json::to_string_pretty(&genesis)
|
||||
.context("Failed to serialize the genesis to JSON")?;
|
||||
println!("{genesis_json}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&schema)
|
||||
.context("Failed to export the JSON schema")?
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl ReportAggregator {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
|
||||
let reporter = self
|
||||
.runner_tx
|
||||
.take()
|
||||
@@ -60,7 +60,7 @@ impl ReportAggregator {
|
||||
(reporter, async move { self.aggregate().await })
|
||||
}
|
||||
|
||||
async fn aggregate(mut self) -> Result<()> {
|
||||
async fn aggregate(mut self) -> Result<Report> {
|
||||
debug!("Starting to aggregate report");
|
||||
|
||||
while let Some(event) = self.runner_rx.recv().await {
|
||||
@@ -152,7 +152,7 @@ impl ReportAggregator {
|
||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
Ok(self.report)
|
||||
}
|
||||
|
||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||
|
||||
@@ -5,51 +5,54 @@ CI. The full models used in the JSON report can be found in the revive different
|
||||
the models used in this script are just a partial reproduction of the full report models.
|
||||
"""
|
||||
|
||||
from typing import TypedDict, Literal, Union
|
||||
|
||||
import json, io
|
||||
import json, typing, io, sys
|
||||
|
||||
|
||||
class Report(TypedDict):
|
||||
class Report(typing.TypedDict):
|
||||
context: "Context"
|
||||
execution_information: dict[
|
||||
"MetadataFilePathString",
|
||||
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
|
||||
]
|
||||
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
|
||||
|
||||
|
||||
class Context(TypedDict):
|
||||
class MetadataFileReport(typing.TypedDict):
|
||||
case_reports: dict["CaseIdxString", "CaseReport"]
|
||||
|
||||
|
||||
class CaseReport(typing.TypedDict):
|
||||
mode_execution_reports: dict["ModeString", "ExecutionReport"]
|
||||
|
||||
|
||||
class ExecutionReport(typing.TypedDict):
|
||||
status: "TestCaseStatus"
|
||||
|
||||
|
||||
class Context(typing.TypedDict):
|
||||
Test: "TestContext"
|
||||
|
||||
|
||||
class TestContext(TypedDict):
|
||||
class TestContext(typing.TypedDict):
|
||||
corpus_configuration: "CorpusConfiguration"
|
||||
|
||||
|
||||
class CorpusConfiguration(TypedDict):
|
||||
class CorpusConfiguration(typing.TypedDict):
|
||||
test_specifiers: list["TestSpecifier"]
|
||||
|
||||
|
||||
class CaseReport(TypedDict):
|
||||
status: "CaseStatus"
|
||||
|
||||
|
||||
class CaseStatusSuccess(TypedDict):
|
||||
status: Literal["Succeeded"]
|
||||
class CaseStatusSuccess(typing.TypedDict):
|
||||
status: typing.Literal["Succeeded"]
|
||||
steps_executed: int
|
||||
|
||||
|
||||
class CaseStatusFailure(TypedDict):
|
||||
status: Literal["Failed"]
|
||||
class CaseStatusFailure(typing.TypedDict):
|
||||
status: typing.Literal["Failed"]
|
||||
reason: str
|
||||
|
||||
|
||||
class CaseStatusIgnored(TypedDict):
|
||||
status: Literal["Ignored"]
|
||||
class CaseStatusIgnored(typing.TypedDict):
|
||||
status: typing.Literal["Ignored"]
|
||||
reason: str
|
||||
|
||||
|
||||
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
||||
|
||||
TestSpecifier = str
|
||||
@@ -64,6 +67,12 @@ MetadataFilePathString = str
|
||||
CaseIdxString = str
|
||||
"""The index of a case as a string. For example '0'"""
|
||||
|
||||
PlatformString = typing.Union[
|
||||
typing.Literal["revive-dev-node-revm-solc"],
|
||||
typing.Literal["revive-dev-node-polkavm-resolc"],
|
||||
]
|
||||
"""A string of the platform on which the test was run"""
|
||||
|
||||
|
||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||
"""
|
||||
@@ -78,12 +87,22 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with open("report.json", "r") as file:
|
||||
with open(sys.argv[1], "r") as file:
|
||||
report: Report = json.load(file)
|
||||
|
||||
# Getting the platform string and resolving it into a simpler version of
|
||||
# itself.
|
||||
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
|
||||
if platform_identifier == "revive-dev-node-polkavm-resolc":
|
||||
platform: str = "PolkaVM"
|
||||
elif platform_identifier == "revive-dev-node-revm-solc":
|
||||
platform: str = "REVM"
|
||||
else:
|
||||
platform: str = platform_identifier
|
||||
|
||||
# Starting the markdown document and adding information to it as we go.
|
||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
||||
print("# Differential Tests Results", file=markdown_document)
|
||||
print(f"# Differential Tests Results ({platform})", file=markdown_document)
|
||||
|
||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
||||
test_specifiers: list[str] = list(
|
||||
@@ -94,7 +113,7 @@ def main() -> None:
|
||||
)
|
||||
print("## Specified Tests", file=markdown_document)
|
||||
for test_specifier in test_specifiers:
|
||||
print(f"* `{test_specifier}`", file=markdown_document)
|
||||
print(f"* ``{test_specifier}``", file=markdown_document)
|
||||
|
||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
||||
total_number_of_cases: int = 0
|
||||
@@ -102,9 +121,13 @@ def main() -> None:
|
||||
total_number_of_failures: int = 0
|
||||
total_number_of_ignores: int = 0
|
||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
||||
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for _, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for _, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for _, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
|
||||
total_number_of_cases += 1
|
||||
if status["status"] == "Succeeded":
|
||||
@@ -144,9 +167,13 @@ def main() -> None:
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
@@ -183,9 +210,13 @@ def main() -> None:
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
@@ -194,7 +225,9 @@ def main() -> None:
|
||||
if status["status"] != "Failed":
|
||||
continue
|
||||
|
||||
failure_reason: str = status["reason"].replace("\n", " ")
|
||||
failure_reason: str = (
|
||||
status["reason"].replace("\n", " ").replace("|", " ")
|
||||
)
|
||||
|
||||
note: str = ""
|
||||
modes_where_this_case_succeeded: set[ModeString] = (
|
||||
@@ -212,7 +245,7 @@ def main() -> None:
|
||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
||||
)
|
||||
print(
|
||||
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
|
||||
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
|
||||
file=markdown_document,
|
||||
)
|
||||
print("\n\n</details>", file=markdown_document)
|
||||
|
||||
Reference in New Issue
Block a user