Fixes PoV over-estimation (#13766)

* Align log

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Use max instead of sum

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Make comment ordering deterministic

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Dont add Pov overhead when all is ignored

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Update test pallet weights

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Re-run weights on bm2

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Fix test

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Actually use new weights

Fucked up the merge for this file...

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Update contract weights

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
This commit is contained in:
Oliver Tale-Yazdi
2023-04-13 15:11:58 +02:00
committed by GitHub
parent 46e2d3e5f8
commit ebfe00d590
54 changed files with 5526 additions and 5534 deletions
@@ -278,6 +278,7 @@ fn get_benchmark_data(
used_recorded_proof_size.push(ComponentSlope { name: name.clone(), slope, error });
}
});
used_recorded_proof_size.sort_by(|a, b| a.name.cmp(&b.name));
// We add additional comments showing which storage items were touched.
// We find the worst case proof size, and use that as the final proof size result.
@@ -315,12 +316,12 @@ fn get_benchmark_data(
let mut base_calculated_proof_size = 0;
// Sum up the proof sizes per component
for (_, slope, base) in proof_size_per_components.iter() {
base_calculated_proof_size += base;
base_calculated_proof_size = base_calculated_proof_size.max(*base);
for component in slope.iter() {
let mut found = false;
for used_component in used_calculated_proof_size.iter_mut() {
if used_component.name == component.name {
used_component.slope += component.slope;
used_component.slope = used_component.slope.max(component.slope);
found = true;
break
}
@@ -337,6 +338,7 @@ fn get_benchmark_data(
}
}
}
used_calculated_proof_size.sort_by(|a, b| a.name.cmp(&b.name));
// This puts a marker on any component which is entirely unused in the weight formula.
let components = batch.time_results[0]
@@ -626,7 +628,7 @@ pub(crate) fn process_storage_results(
},
};
// Add the additional trie layer overhead for every new prefix.
if *reads > 0 {
if *reads > 0 && !is_all_ignored {
prefix_result.proof_size += 15 * 33 * additional_trie_layers as u32;
}
storage_per_prefix.entry(prefix.clone()).or_default().push(prefix_result);