mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-30 23:37:56 +00:00
Update benchmarking macros (#3934)
Current benchmarking macro returns a closure with the captured
benchmarked code.
This can cause issues when the benchmarked code has complex lifetime
requirements.
This PR updates the existing macro by injecting the recording parameter
and invoking the start / stop method around the benchmarked block
instead of returning a closure
One other added benefit is that you can write this kind of code now as
well:
```rust
let v;
#[block]
{ v = func.call(); }
dbg!(v); // or assert something on v
```
[Weights compare
link](https://weights.tasty.limo/compare?unit=weight&ignore_errors=true&threshold=10&method=asymptotic&repo=polkadot-sdk&old=pg/fix-weights&new=pg/bench_update&path_pattern=substrate/frame/**/src/weights.rs,polkadot/runtime/*/src/weights/**/*.rs,polkadot/bridges/modules/*/src/weights.rs,cumulus/**/weights/*.rs,cumulus/**/weights/xcm/*.rs,cumulus/**/src/weights.rs)
---------
Co-authored-by: command-bot <>
Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: Alexander Theißen <alex.theissen@me.com>
This commit is contained in:
@@ -262,15 +262,11 @@ mod benchmarks {
|
||||
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
|
||||
assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]);
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
new_test_ext().execute_with(|| {
|
||||
assert_ok!(closure());
|
||||
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -281,15 +277,11 @@ mod benchmarks {
|
||||
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
|
||||
assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]);
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
new_test_ext().execute_with(|| {
|
||||
assert_ok!(closure());
|
||||
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -300,14 +292,12 @@ mod benchmarks {
|
||||
let components = <SelectedBenchmark as BenchmarkingSetup<Test>>::components(&selected);
|
||||
assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]);
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::x, 1)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
assert_ok!(closure());
|
||||
new_test_ext().execute_with(|| {
|
||||
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::x, 1)],
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -315,29 +305,24 @@ mod benchmarks {
|
||||
// Check postcondition for benchmark `set_value` is valid.
|
||||
let selected = SelectedBenchmark::set_value;
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
new_test_ext().execute_with(|| {
|
||||
assert_ok!(closure());
|
||||
assert_ok!(<SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
));
|
||||
});
|
||||
|
||||
// Check postcondition for benchmark `bad_verify` is invalid.
|
||||
let selected = SelectedBenchmark::bad_verify;
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::x, 10000)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
new_test_ext().execute_with(|| {
|
||||
assert_err!(closure(), "You forgot to sort!");
|
||||
assert_err!(
|
||||
<SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::x, 10000)],
|
||||
),
|
||||
"You forgot to sort!"
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -345,15 +330,11 @@ mod benchmarks {
|
||||
fn benchmark_override_works() {
|
||||
let selected = SelectedBenchmark::override_benchmark;
|
||||
|
||||
let closure = <SelectedBenchmark as BenchmarkingSetup<Test>>::instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
true,
|
||||
)
|
||||
.expect("failed to create closure");
|
||||
|
||||
new_test_ext().execute_with(|| {
|
||||
let result = closure();
|
||||
let result = <SelectedBenchmark as BenchmarkingSetup<Test>>::unit_test_instance(
|
||||
&selected,
|
||||
&[(BenchmarkParameter::b, 1)],
|
||||
);
|
||||
assert!(matches!(result, Err(BenchmarkError::Override(_))));
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user