Generic Normalize impl for arithmetic and npos-elections (#6374)

* add normalize

* better api for normalize

* Some grumbles

* Update primitives/arithmetic/src/lib.rs

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* More great review grumbles

* Way better doc for everything.

* Some improvement

* Update primitives/arithmetic/src/lib.rs

Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>
This commit is contained in:
Kian Paimani
2020-06-24 15:32:50 +02:00
committed by GitHub
parent b14b472edf
commit e016a49322
15 changed files with 809 additions and 246 deletions
@@ -44,7 +44,9 @@ enum Mode {
}
pub fn new_test_ext(iterations: u32) -> sp_io::TestExternalities {
let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::<mock::Test>().map(Into::into)
let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default()
.build_storage::<mock::Test>()
.map(Into::into)
.expect("Failed to create test externalities.");
let (offchain, offchain_state) = TestOffchainExt::new();
@@ -70,26 +72,29 @@ fn main() {
loop {
fuzz!(|data: (u32, u32, u32, u32, u32)| {
let (mut num_validators, mut num_nominators, mut edge_per_voter, mut to_elect, mode_u32) = data;
// always run with 5 iterations.
let mut ext = new_test_ext(5);
let mode: Mode = unsafe { std::mem::transmute(mode_u32) };
num_validators = to_range(num_validators, 50, 1000);
num_nominators = to_range(num_nominators, 50, 2000);
edge_per_voter = to_range(edge_per_voter, 1, 16);
to_elect = to_range(to_elect, 20, num_validators);
let do_reduce = true;
println!("+++ instance with params {} / {} / {} / {:?}({}) / {}",
println!("+++ instance with params {} / {} / {} / {} / {:?}({})",
num_nominators,
num_validators,
edge_per_voter,
to_elect,
mode,
mode_u32,
to_elect,
);
ext.execute_with(|| {
// initial setup
init_active_era();
assert_ok!(create_validators_with_nominators_for_era::<Test>(
num_validators,
num_nominators,
@@ -97,11 +102,11 @@ fn main() {
true,
None,
));
<EraElectionStatus<Test>>::put(ElectionStatus::Open(1));
assert!(<Staking<Test>>::create_stakers_snapshot().0);
let origin = RawOrigin::Signed(create_funded_user::<Test>("fuzzer", 0, 100));
println!("++ Chain setup done.");
let origin = RawOrigin::Signed(create_funded_user::<Test>("fuzzer", 0, 100));
// stuff to submit
let (winners, compact, score, size) = match mode {
@@ -141,8 +146,6 @@ fn main() {
}
};
println!("++ Submission ready. Score = {:?}", score);
// must have chosen correct number of winners.
assert_eq!(winners.len() as u32, <Staking<Test>>::validator_count());
@@ -203,7 +203,8 @@ pub fn prepare_submission<T: Trait>(
}
// Convert back to ratio assignment. This takes less space.
let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio(staked);
let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio_normalized(staked)
.map_err(|e| OffchainElectionError::from(e))?;
// convert back to staked to compute the score in the receiver's accuracy. This can be done
// nicer, for now we do it as such since this code is not time-critical. This ensure that the
+2 -5
View File
@@ -201,11 +201,8 @@ pub fn get_weak_solution<T: Trait>(
};
// convert back to ratio assignment. This takes less space.
let low_accuracy_assignment: Vec<Assignment<T::AccountId, OffchainAccuracy>> =
staked_assignments
.into_iter()
.map(|sa| sa.into_assignment(true))
.collect();
let low_accuracy_assignment = assignment_staked_to_ratio_normalized(staked_assignments)
.expect("Failed to normalize");
// re-calculate score based on what the chain will decode.
let score = {
@@ -24,6 +24,10 @@ num-traits = "0.2"
name = "biguint"
path = "src/biguint.rs"
[[bin]]
name = "normalize"
path = "src/normalize.rs"
[[bin]]
name = "per_thing_rational"
path = "src/per_thing_rational.rs"
@@ -0,0 +1,62 @@
// This file is part of Substrate.
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Running
//! Running this fuzzer can be done with `cargo hfuzz run normalize`. `honggfuzz` CLI options can
//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads.
//!
//! # Debugging a panic
//! Once a panic is found, it can be debugged with
//! `cargo hfuzz run-debug normalize hfuzz_workspace/normalize/*.fuzz`.
use honggfuzz::fuzz;
use sp_arithmetic::Normalizable;
use std::convert::TryInto;
fn main() {
let sum_limit = u32::max_value() as u128;
let len_limit: usize = u32::max_value().try_into().unwrap();
loop {
fuzz!(|data: (Vec<u32>, u32)| {
let (data, norm) = data;
if data.len() == 0 { return; }
let pre_sum: u128 = data.iter().map(|x| *x as u128).sum();
let normalized = data.normalize(norm);
// error cases.
if pre_sum > sum_limit || data.len() > len_limit {
assert!(normalized.is_err())
} else {
if let Ok(normalized) = normalized {
// if sum goes beyond u128, panic.
let sum: u128 = normalized.iter().map(|x| *x as u128).sum();
// if this function returns Ok(), then it will ALWAYS be accurate.
assert_eq!(
sum,
norm as u128,
"sums don't match {:?}, {}",
normalized,
norm,
);
}
}
})
}
}
@@ -114,7 +114,7 @@ fn main() {
}
}
fn assert_per_thing_equal_error<T: PerThing>(a: T, b: T, err: u128) {
fn assert_per_thing_equal_error<P: PerThing>(a: P, b: P, err: u128) {
let a_abs = a.deconstruct().saturated_into::<u128>();
let b_abs = b.deconstruct().saturated_into::<u128>();
let diff = a_abs.max(b_abs) - a_abs.min(b_abs);
+361 -3
View File
@@ -41,10 +41,11 @@ mod fixed_point;
mod rational128;
pub use fixed_point::{FixedPointNumber, FixedPointOperand, FixedI64, FixedI128, FixedU128};
pub use per_things::{PerThing, InnerOf, Percent, PerU16, Permill, Perbill, Perquintill};
pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbill, Perquintill};
pub use rational128::Rational128;
use sp_std::cmp::Ordering;
use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto};
use traits::{BaseArithmetic, One, Zero, SaturatedConversion, Unsigned};
/// Trait for comparing two numbers with an threshold.
///
@@ -85,8 +86,365 @@ where
}
}
/// A collection-like object that is made of values of type `T` and can normalize its individual
/// values around a centric point.
///
/// Note that the order of items in the collection may affect the result.
pub trait Normalizable<T> {
/// Normalize self around `targeted_sum`.
///
/// Only returns `Ok` if the new sum of results is guaranteed to be equal to `targeted_sum`.
/// Else, returns an error explaining why it failed to do so.
fn normalize(&self, targeted_sum: T) -> Result<Vec<T>, &'static str>;
}
macro_rules! impl_normalize_for_numeric {
($($numeric:ty),*) => {
$(
impl Normalizable<$numeric> for Vec<$numeric> {
fn normalize(&self, targeted_sum: $numeric) -> Result<Vec<$numeric>, &'static str> {
normalize(self.as_ref(), targeted_sum)
}
}
)*
};
}
impl_normalize_for_numeric!(u8, u16, u32, u64, u128);
impl<P: PerThing> Normalizable<P> for Vec<P> {
fn normalize(&self, targeted_sum: P) -> Result<Vec<P>, &'static str> {
let inners = self.iter().map(|p| p.clone().deconstruct().into()).collect::<Vec<_>>();
let normalized = normalize(inners.as_ref(), targeted_sum.deconstruct().into())?;
Ok(normalized.into_iter().map(|i: UpperOf<P>| P::from_parts(i.saturated_into())).collect())
}
}
/// Normalize `input` so that the sum of all elements reaches `targeted_sum`.
///
/// This implementation is currently in a balanced position between being performant and accurate.
///
/// 1. We prefer storing original indices, and sorting the `input` only once. This will save the
/// cost of sorting per round at the cost of a little bit of memory.
/// 2. The granularity of increment/decrements is determined by the number of elements in `input`
/// and their sum difference with `targeted_sum`, namely `diff = diff(sum(input), target_sum)`.
/// This value is then distributed into `per_round = diff / input.len()` and `leftover = diff %
/// round`. First, per_round is applied to all elements of input, and then we move to leftover,
/// in which case we add/subtract 1 by 1 until `leftover` is depleted.
///
/// When the sum is less than the target, the above approach always holds. In this case, then each
/// individual element is also less than target. Thus, by adding `per_round` to each item, neither
/// of them can overflow the numeric bound of `T`. In fact, neither of the can go beyond
/// `target_sum`*.
///
/// If sum is more than target, there is small twist. The subtraction of `per_round`
/// form each element might go below zero. In this case, we saturate and add the error to the
/// `leftover` value. This ensures that the result will always stay accurate, yet it might cause the
/// execution to become increasingly slow, since leftovers are applied one by one.
///
/// All in all, the complicated case above is rare to happen in all substrate use cases, hence we
/// opt for it due to its simplicity.
///
/// This function will return an error is if length of `input` cannot fit in `T`, or if `sum(input)`
/// cannot fit inside `T`.
///
/// * This proof is used in the implementation as well.
pub fn normalize<T>(input: &[T], targeted_sum: T) -> Result<Vec<T>, &'static str>
where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug,
{
// compute sum and return error if failed.
let mut sum = T::zero();
for t in input.iter() {
sum = sum.checked_add(t).ok_or("sum of input cannot fit in `T`")?;
}
// convert count and return error if failed.
let count = input.len();
let count_t: T = count.try_into().map_err(|_| "length of `inputs` cannot fit in `T`")?;
// Nothing to do here.
if count.is_zero() {
return Ok(Vec::<T>::new());
}
let diff = targeted_sum.max(sum) - targeted_sum.min(sum);
if diff.is_zero() {
return Ok(input.to_vec());
}
let needs_bump = targeted_sum > sum;
let per_round = diff / count_t;
let mut leftover = diff % count_t;
// sort output once based on diff. This will require more data transfer and saving original
// index, but we sort only twice instead: once now and once at the very end.
let mut output_with_idx = input.iter().cloned().enumerate().collect::<Vec<(usize, T)>>();
output_with_idx.sort_unstable_by_key(|x| x.1);
if needs_bump {
// must increase the values a bit. Bump from the min element. Index of minimum is now zero
// because we did a sort. If at any point the min goes greater or equal the `max_threshold`,
// we move to the next minimum.
let mut min_index = 0;
// at this threshold we move to next index.
let threshold = targeted_sum / count_t;
if !per_round.is_zero() {
for _ in 0..count {
output_with_idx[min_index].1 = output_with_idx[min_index].1
.checked_add(&per_round)
.expect("Proof provided in the module doc; qed.");
if output_with_idx[min_index].1 >= threshold {
min_index += 1;
min_index = min_index % count;
}
}
}
// continue with the previous min_index
while !leftover.is_zero() {
output_with_idx[min_index].1 = output_with_idx[min_index].1
.checked_add(&T::one())
.expect("Proof provided in the module doc; qed.");
if output_with_idx[min_index].1 >= threshold {
min_index += 1;
min_index = min_index % count;
}
leftover -= One::one()
}
} else {
// must decrease the stakes a bit. decrement from the max element. index of maximum is now
// last. if at any point the max goes less or equal the `min_threshold`, we move to the next
// maximum.
let mut max_index = count - 1;
// at this threshold we move to next index.
let threshold = output_with_idx
.first()
.expect("length of input is greater than zero; it must have a first; qed")
.1;
if !per_round.is_zero() {
for _ in 0..count {
output_with_idx[max_index].1 = output_with_idx[max_index].1
.checked_sub(&per_round)
.unwrap_or_else(|| {
let remainder = per_round - output_with_idx[max_index].1;
leftover += remainder;
output_with_idx[max_index].1.saturating_sub(per_round)
});
if output_with_idx[max_index].1 <= threshold {
max_index = max_index.checked_sub(1).unwrap_or(count - 1);
}
}
}
// continue with the previous max_index
while !leftover.is_zero() {
if let Some(next) = output_with_idx[max_index].1.checked_sub(&One::one()) {
output_with_idx[max_index].1 = next;
if output_with_idx[max_index].1 <= threshold {
max_index = max_index.checked_sub(1).unwrap_or(count - 1);
}
leftover -= One::one()
} else {
max_index = max_index.checked_sub(1).unwrap_or(count - 1);
}
}
}
debug_assert_eq!(
output_with_idx.iter().fold(T::zero(), |acc, (_, x)| acc + *x),
targeted_sum,
"sum({:?}) != {:?}",
output_with_idx,
targeted_sum,
);
// sort again based on the original index.
output_with_idx.sort_unstable_by_key(|x| x.0);
Ok(output_with_idx.into_iter().map(|(_, t)| t).collect())
}
#[cfg(test)]
mod tests {
mod normalize_tests {
use super::*;
#[test]
fn work_for_all_types() {
macro_rules! test_for {
($type:ty) => {
assert_eq!(
normalize(vec![8 as $type, 9, 7, 10].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
}
}
// it should work for all types as long as the length of vector can be converted to T.
test_for!(u128);
test_for!(u64);
test_for!(u32);
test_for!(u16);
test_for!(u8);
}
#[test]
fn fails_on_if_input_sum_large() {
assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok());
assert_eq!(
normalize(vec![1u8; 256].as_ref(), 10),
Err("sum of input cannot fit in `T`"),
);
}
#[test]
fn does_not_fail_on_subtraction_overflow() {
assert_eq!(
normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(),
vec![1, 9, 0],
);
assert_eq!(
normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(),
vec![0, 1, 0],
);
}
#[test]
fn works_for_vec() {
assert_eq!(vec![8u32, 9, 7, 10].normalize(40).unwrap(), vec![10u32, 10, 10, 10]);
}
#[test]
fn works_for_per_thing() {
assert_eq!(
vec![
Perbill::from_percent(33),
Perbill::from_percent(33),
Perbill::from_percent(33)
].normalize(Perbill::one()).unwrap(),
vec![
Perbill::from_parts(333333334),
Perbill::from_parts(333333333),
Perbill::from_parts(333333333),
]
);
assert_eq!(
vec![
Perbill::from_percent(20),
Perbill::from_percent(15),
Perbill::from_percent(30)
].normalize(Perbill::one()).unwrap(),
vec![
Perbill::from_parts(316666668),
Perbill::from_parts(383333332),
Perbill::from_parts(300000000),
]
);
}
#[test]
fn can_work_for_peru16() {
// Peru16 is a rather special case; since inner type is exactly the same as capacity, we
// could have a situation where the sum cannot be calculated in the inner type. Calculating
// using the upper type of the per_thing should assure this to be okay.
assert_eq!(
vec![
PerU16::from_percent(40),
PerU16::from_percent(40),
PerU16::from_percent(40),
].normalize(PerU16::one()).unwrap(),
vec![
PerU16::from_parts(21845), // 33%
PerU16::from_parts(21845), // 33%
PerU16::from_parts(21845), // 33%
]
);
}
#[test]
fn normalize_works_all_le() {
assert_eq!(
normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(),
vec![11, 11, 8, 10],
);
assert_eq!(
normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(),
vec![11, 8, 11, 10],
);
assert_eq!(
normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(),
vec![11, 11, 8, 10],
);
}
#[test]
fn normalize_works_some_ge() {
assert_eq!(
normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(),
vec![10, 11, 9, 10],
);
}
#[test]
fn always_inc_min() {
assert_eq!(
normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
}
#[test]
fn normalize_works_all_ge() {
assert_eq!(
normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(),
vec![10, 10, 10, 10],
);
assert_eq!(
normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(),
vec![12, 9, 9, 10],
);
assert_eq!(
normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(),
vec![9, 12, 9, 10],
);
assert_eq!(
normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(),
vec![9, 9, 12, 10],
);
}
}
#[cfg(test)]
mod threshold_compare_tests {
use super::*;
use crate::traits::Saturating;
use sp_std::cmp::Ordering;
@@ -21,24 +21,29 @@ use serde::{Serialize, Deserialize};
use sp_std::{ops, fmt, prelude::*, convert::TryInto};
use codec::{Encode, CompactAs};
use crate::traits::{
SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero,
SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned,
};
use sp_debug_derive::RuntimeDebug;
/// Get the inner type of a `PerThing`.
pub type InnerOf<P> = <P as PerThing>::Inner;
/// Get the upper type of a `PerThing`.
pub type UpperOf<P> = <P as PerThing>::Upper;
/// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per
/// `X`_.
pub trait PerThing:
Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug
{
/// The data type used to build this per-thingy.
type Inner: BaseArithmetic + Copy + fmt::Debug;
type Inner: BaseArithmetic + Unsigned + Copy + fmt::Debug;
/// A data type larger than `Self::Inner`, used to avoid overflow in some computations.
/// It must be able to compute `ACCURACY^2`.
type Upper: BaseArithmetic + Copy + From<Self::Inner> + TryInto<Self::Inner> + fmt::Debug;
type Upper:
BaseArithmetic + Copy + From<Self::Inner> + TryInto<Self::Inner> +
UniqueSaturatedInto<Self::Inner> + Unsigned + fmt::Debug;
/// The accuracy of this type.
const ACCURACY: Self::Inner;
@@ -22,7 +22,7 @@ use codec::HasCompact;
pub use integer_sqrt::IntegerSquareRoot;
pub use num_traits::{
Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedNeg,
CheckedShl, CheckedShr, checked_pow, Signed
CheckedShl, CheckedShr, checked_pow, Signed, Unsigned,
};
use sp_std::ops::{
Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign,
@@ -59,8 +59,8 @@ mod bench_closure_and_slice {
}
/// Converts a vector of ratio assignments into ones with absolute budget value.
pub fn assignment_ratio_to_staked_slice<A: IdentifierT, T: PerThing>(
ratio: Vec<Assignment<A, T>>,
pub fn assignment_ratio_to_staked_slice<A: IdentifierT, P: PerThing>(
ratio: Vec<Assignment<A, P>>,
stakes: &[VoteWeight],
) -> Vec<StakedAssignment<A>>
where
@@ -17,37 +17,72 @@
//! Helper methods for npos-elections.
use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf};
use sp_arithmetic::PerThing;
use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf, Error};
use sp_arithmetic::{PerThing, InnerOf};
use sp_std::prelude::*;
/// Converts a vector of ratio assignments into ones with absolute budget value.
pub fn assignment_ratio_to_staked<A: IdentifierT, T: PerThing, FS>(
ratio: Vec<Assignment<A, T>>,
///
/// Note that this will NOT attempt at normalizing the result.
pub fn assignment_ratio_to_staked<A: IdentifierT, P: PerThing, FS>(
ratio: Vec<Assignment<A, P>>,
stake_of: FS,
) -> Vec<StakedAssignment<A>>
where
for<'r> FS: Fn(&'r A) -> VoteWeight,
T: sp_std::ops::Mul<ExtendedBalance, Output = ExtendedBalance>,
ExtendedBalance: From<<T as PerThing>::Inner>,
P: sp_std::ops::Mul<ExtendedBalance, Output = ExtendedBalance>,
ExtendedBalance: From<InnerOf<P>>,
{
ratio
.into_iter()
.map(|a| {
let stake = stake_of(&a.who);
a.into_staked(stake.into(), true)
a.into_staked(stake.into())
})
.collect()
}
/// Converts a vector of staked assignments into ones with ratio values.
pub fn assignment_staked_to_ratio<A: IdentifierT, T: PerThing>(
staked: Vec<StakedAssignment<A>>,
) -> Vec<Assignment<A, T>>
/// Same as [`assignment_ratio_to_staked`] and try and do normalization.
pub fn assignment_ratio_to_staked_normalized<A: IdentifierT, P: PerThing, FS>(
ratio: Vec<Assignment<A, P>>,
stake_of: FS,
) -> Result<Vec<StakedAssignment<A>>, Error>
where
ExtendedBalance: From<<T as PerThing>::Inner>,
for<'r> FS: Fn(&'r A) -> VoteWeight,
P: sp_std::ops::Mul<ExtendedBalance, Output = ExtendedBalance>,
ExtendedBalance: From<InnerOf<P>>,
{
staked.into_iter().map(|a| a.into_assignment(true)).collect()
let mut staked = assignment_ratio_to_staked(ratio, &stake_of);
staked.iter_mut().map(|a|
a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err))
).collect::<Result<_, _>>()?;
Ok(staked)
}
/// Converts a vector of staked assignments into ones with ratio values.
///
/// Note that this will NOT attempt at normalizing the result.
pub fn assignment_staked_to_ratio<A: IdentifierT, P: PerThing>(
staked: Vec<StakedAssignment<A>>,
) -> Vec<Assignment<A, P>>
where
ExtendedBalance: From<InnerOf<P>>,
{
staked.into_iter().map(|a| a.into_assignment()).collect()
}
/// Same as [`assignment_staked_to_ratio`] and try and do normalization.
pub fn assignment_staked_to_ratio_normalized<A: IdentifierT, P: PerThing>(
staked: Vec<StakedAssignment<A>>,
) -> Result<Vec<Assignment<A, P>>, Error>
where
ExtendedBalance: From<InnerOf<P>>,
{
let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::<Vec<_>>();
ratio.iter_mut().map(|a|
a.try_normalize().map_err(|err| Error::ArithmeticError(err))
).collect::<Result<_, _>>()?;
Ok(ratio)
}
/// consumes a vector of winners with backing stake to just winners.
+60 -54
View File
@@ -30,7 +30,7 @@
use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom};
use sp_arithmetic::{
PerThing, Rational128, ThresholdOrd,
PerThing, Rational128, ThresholdOrd, InnerOf, Normalizable,
helpers_128bit::multiply_by_rational,
traits::{Zero, Saturating, Bounded, SaturatedConversion},
};
@@ -84,6 +84,8 @@ pub enum Error {
CompactTargetOverflow,
/// One of the index functions returned none.
CompactInvalidIndex,
/// An error occurred in some arithmetic operation.
ArithmeticError(&'static str),
}
/// A type which is used in the API of this crate as a numeric weight of a vote, most often the
@@ -155,16 +157,16 @@ pub struct ElectionResult<AccountId, T: PerThing> {
/// A voter's stake assignment among a set of targets, represented as ratios.
#[derive(Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))]
pub struct Assignment<AccountId, T: PerThing> {
pub struct Assignment<AccountId, P: PerThing> {
/// Voter's identifier.
pub who: AccountId,
/// The distribution of the voter's stake.
pub distribution: Vec<(AccountId, T)>,
pub distribution: Vec<(AccountId, P)>,
}
impl<AccountId, T: PerThing> Assignment<AccountId, T>
impl<AccountId: IdentifierT, P: PerThing> Assignment<AccountId, P>
where
ExtendedBalance: From<<T as PerThing>::Inner>,
ExtendedBalance: From<InnerOf<P>>,
{
/// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`].
///
@@ -173,50 +175,49 @@ where
/// distribution's sum is exactly equal to the total budget, by adding or subtracting the
/// remainder from the last distribution.
///
/// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean
/// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean
/// anything useful.
pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment<AccountId>
pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment<AccountId>
where
T: sp_std::ops::Mul<ExtendedBalance, Output = ExtendedBalance>,
P: sp_std::ops::Mul<ExtendedBalance, Output = ExtendedBalance>,
{
let mut sum: ExtendedBalance = Bounded::min_value();
let mut distribution = self
.distribution
let distribution = self.distribution
.into_iter()
.filter_map(|(target, p)| {
// if this ratio is zero, then skip it.
if p == Bounded::min_value() {
if p.is_zero() {
None
} else {
// NOTE: this mul impl will always round to the nearest number, so we might both
// overflow and underflow.
let distribution_stake = p * stake;
// defensive only. We assume that balance cannot exceed extended balance.
sum = sum.saturating_add(distribution_stake);
Some((target, distribution_stake))
}
})
.collect::<Vec<(AccountId, ExtendedBalance)>>();
if fill {
// NOTE: we can do this better.
// https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b
if let Some(leftover) = stake.checked_sub(sum) {
if let Some(last) = distribution.last_mut() {
last.1 = last.1.saturating_add(leftover);
}
} else if let Some(excess) = sum.checked_sub(stake) {
if let Some(last) = distribution.last_mut() {
last.1 = last.1.saturating_sub(excess);
}
}
}
StakedAssignment {
who: self.who,
distribution,
}
}
/// Try and normalize this assignment.
///
/// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%.
pub fn try_normalize(&mut self) -> Result<(), &'static str> {
self.distribution
.iter()
.map(|(_, p)| *p)
.collect::<Vec<_>>()
.normalize(P::one())
.map(|normalized_ratios|
self.distribution
.iter_mut()
.zip(normalized_ratios)
.for_each(|((_, old), corrected)| { *old = corrected; })
)
}
}
/// A voter's stake assignment among a set of targets, represented as absolute values in the scale
@@ -243,42 +244,23 @@ impl<AccountId> StakedAssignment<AccountId> {
///
/// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge
/// can never be re-created and does not mean anything useful anymore.
pub fn into_assignment<T: PerThing>(self, fill: bool) -> Assignment<AccountId, T>
pub fn into_assignment<P: PerThing>(self) -> Assignment<AccountId, P>
where
ExtendedBalance: From<<T as PerThing>::Inner>,
ExtendedBalance: From<InnerOf<P>>,
AccountId: IdentifierT,
{
let accuracy: u128 = T::ACCURACY.saturated_into();
let mut sum: u128 = Zero::zero();
let stake = self.distribution.iter().map(|x| x.1).sum();
let mut distribution = self
.distribution
let stake = self.total();
let distribution = self.distribution
.into_iter()
.filter_map(|(target, w)| {
let per_thing = T::from_rational_approximation(w, stake);
let per_thing = P::from_rational_approximation(w, stake);
if per_thing == Bounded::min_value() {
None
} else {
sum += per_thing.clone().deconstruct().saturated_into();
Some((target, per_thing))
}
})
.collect::<Vec<(AccountId, T)>>();
if fill {
if let Some(leftover) = accuracy.checked_sub(sum) {
if let Some(last) = distribution.last_mut() {
last.1 = last.1.saturating_add(
T::from_parts(leftover.saturated_into())
);
}
} else if let Some(excess) = sum.checked_sub(accuracy) {
if let Some(last) = distribution.last_mut() {
last.1 = last.1.saturating_sub(
T::from_parts(excess.saturated_into())
);
}
}
}
.collect::<Vec<(AccountId, P)>>();
Assignment {
who: self.who,
@@ -286,6 +268,30 @@ impl<AccountId> StakedAssignment<AccountId> {
}
}
/// Try and normalize this assignment.
///
/// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to
/// `stake`.
///
/// NOTE: current implementation of `.normalize` is almost safe to `expect()` upon. The only
/// error case is when the input cannot fit in `T`, or the sum of input cannot fit in `T`.
/// Sadly, both of these are dependent upon the implementation of `VoteLimit`, i.e. the limit
/// of edges per voter which is enforced from upstream. Hence, at this crate, we prefer
/// returning a result and a use the name prefix `try_`.
pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> {
self.distribution
.iter()
.map(|(_, ref weight)| *weight)
.collect::<Vec<_>>()
.normalize(stake)
.map(|normalized_weights|
self.distribution
.iter_mut()
.zip(normalized_weights.into_iter())
.for_each(|((_, weight), corrected)| { *weight = corrected; })
)
}
/// Get the total stake of this assignment (aka voter budget).
pub fn total(&self) -> ExtendedBalance {
self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1))
+246 -154
View File
@@ -588,186 +588,278 @@ fn self_votes_should_be_kept() {
);
}
#[test]
fn assignment_convert_works() {
let staked = StakedAssignment {
who: 1 as AccountId,
distribution: vec![
(20, 100 as ExtendedBalance),
(30, 25),
],
};
mod assignment_convert_normalize {
use super::*;
#[test]
fn assignment_convert_works() {
let staked = StakedAssignment {
who: 1 as AccountId,
distribution: vec![
(20, 100 as ExtendedBalance),
(30, 25),
],
};
let assignment = staked.clone().into_assignment(true);
assert_eq!(
assignment,
Assignment {
let assignment = staked.clone().into_assignment();
assert_eq!(
assignment,
Assignment {
who: 1,
distribution: vec![
(20, Perbill::from_percent(80)),
(30, Perbill::from_percent(20)),
]
}
);
assert_eq!(
assignment.into_staked(125),
staked,
);
}
#[test]
fn assignment_convert_will_not_normalize() {
assert_eq!(
Assignment {
who: 1,
distribution: vec![
(2, Perbill::from_percent(33)),
(3, Perbill::from_percent(66)),
]
}.into_staked(100),
StakedAssignment {
who: 1,
distribution: vec![
(2, 33),
(3, 66),
// sum is not 100!
],
},
);
assert_eq!(
StakedAssignment {
who: 1,
distribution: vec![
(2, 333_333_333_333_333),
(3, 333_333_333_333_333),
(4, 666_666_666_666_333),
],
}.into_assignment(),
Assignment {
who: 1,
distribution: vec![
(2, Perbill::from_parts(250000000)),
(3, Perbill::from_parts(250000000)),
(4, Perbill::from_parts(499999999)),
// sum is not 100%!
]
},
)
}
#[test]
fn assignment_can_normalize() {
let mut a = Assignment {
who: 1,
distribution: vec![
(20, Perbill::from_percent(80)),
(30, Perbill::from_percent(20)),
(2, Perbill::from_parts(330000000)),
(3, Perbill::from_parts(660000000)),
// sum is not 100%!
]
};
a.try_normalize().unwrap();
assert_eq!(
a,
Assignment {
who: 1,
distribution: vec![
(2, Perbill::from_parts(340000000)),
(3, Perbill::from_parts(660000000)),
]
},
);
}
#[test]
fn staked_assignment_can_normalize() {
let mut a = StakedAssignment {
who: 1,
distribution: vec![
(2, 33),
(3, 66),
]
};
a.try_normalize(100).unwrap();
assert_eq!(
a,
StakedAssignment {
who: 1,
distribution: vec![
(2, 34),
(3, 66),
]
},
);
}
}
mod score {
use super::*;
#[test]
fn score_comparison_is_lexicographical_no_epsilon() {
let epsilon = Perbill::zero();
// only better in the fist parameter, worse in the other two ✅
assert_eq!(
is_score_better([12, 10, 35], [10, 20, 30], epsilon),
true,
);
// worse in the first, better in the other two ❌
assert_eq!(
is_score_better([9, 30, 10], [10, 20, 30], epsilon),
false,
);
// equal in the first, the second one dictates.
assert_eq!(
is_score_better([10, 25, 40], [10, 20, 30], epsilon),
true,
);
// equal in the first two, the last one dictates.
assert_eq!(
is_score_better([10, 20, 40], [10, 20, 30], epsilon),
false,
);
}
#[test]
fn score_comparison_with_epsilon() {
let epsilon = Perbill::from_percent(1);
{
// no more than 1 percent (10) better in the first param.
assert_eq!(
is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
// now equal, still not better.
assert_eq!(
is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
// now it is.
assert_eq!(
is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon),
true,
);
}
);
assert_eq!(
assignment.into_staked(125, true),
staked,
);
}
{
// First score score is epsilon better, but first score is no longer `ge`. Then this is
// still not a good solution.
assert_eq!(
is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon),
false,
);
}
#[test]
fn score_comparison_is_lexicographical_no_epsilon() {
let epsilon = Perbill::zero();
// only better in the fist parameter, worse in the other two ✅
assert_eq!(
is_score_better([12, 10, 35], [10, 20, 30], epsilon),
true,
);
{
// first score is equal or better, but not epsilon. Then second one is the determinant.
assert_eq!(
is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
// worse in the first, better in the other two ❌
assert_eq!(
is_score_better([9, 30, 10], [10, 20, 30], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon),
false,
);
// equal in the first, the second one dictates.
assert_eq!(
is_score_better([10, 25, 40], [10, 20, 30], epsilon),
true,
);
assert_eq!(
is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon),
true,
);
}
// equal in the first two, the last one dictates.
assert_eq!(
is_score_better([10, 20, 40], [10, 20, 30], epsilon),
false,
);
}
{
// first score and second are equal or less than epsilon more, third is determinant.
assert_eq!(
is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon),
false,
);
#[test]
fn score_comparison_with_epsilon() {
let epsilon = Perbill::from_percent(1);
assert_eq!(
is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon),
true,
);
}
}
#[test]
fn score_comparison_large_value() {
// some random value taken from eras in kusama.
let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446];
// this claim is 0.04090% better in the third component. It should be accepted as better if
// epsilon is smaller than 5/10_0000
let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054];
{
// no more than 1 percent (10) better in the first param.
assert_eq!(
is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
// now equal, still not better.
assert_eq!(
is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
// now it is.
assert_eq!(
is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon),
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(1u32, 10_000),
),
true,
);
}
{
// First score score is epsilon better, but first score is no longer `ge`. Then this is
// still not a good solution.
assert_eq!(
is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon),
false,
);
}
{
// first score is equal or better, but not epsilon. Then second one is the determinant.
assert_eq!(
is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon),
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(2u32, 10_000),
),
true,
);
}
{
// first score and second are equal or less than epsilon more, third is determinant.
assert_eq!(
is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon),
false,
);
assert_eq!(
is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon),
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(3u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(4u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(5u32, 10_000),
),
false,
);
}
}
#[test]
fn score_comparison_large_value() {
// some random value taken from eras in kusama.
let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446];
// this claim is 0.04090% better in the third component. It should be accepted as better if
// epsilon is smaller than 5/10_0000
let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054];
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(1u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(2u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(3u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(4u32, 10_000),
),
true,
);
assert_eq!(
is_score_better(
claim.clone(),
initial.clone(),
Perbill::from_rational_approximation(5u32, 10_000),
),
false,
);
}
mod compact {
use codec::{Decode, Encode};
use super::AccountId;
+1 -1
View File
@@ -71,7 +71,7 @@ pub use sp_core::RuntimeDebug;
/// Re-export top-level arithmetic stuff.
pub use sp_arithmetic::{
PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16,
PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16, InnerOf,
Rational128, FixedI64, FixedI128, FixedU128, FixedPointNumber, FixedPointOperand,
};
/// Re-export 128 bit helpers.
+1 -1
View File
@@ -38,7 +38,7 @@
/// ```
#[macro_export]
macro_rules! assert_eq_uvec {
( $x:expr, $y:expr ) => {
( $x:expr, $y:expr $(,)? ) => {
$crate::__assert_eq_uvec!($x, $y);
$crate::__assert_eq_uvec!($y, $x);
}