cargo +nightly fmt (#3540)

* cargo +nightly fmt

* add cargo-fmt check to ci

* update ci

* fmt

* fmt

* skip macro

* ignore bridges
This commit is contained in:
Shawn Tabrizi
2021-08-02 12:47:33 +02:00
committed by GitHub
parent 30e3012270
commit ff5d56fb76
350 changed files with 20617 additions and 21266 deletions
+76 -96
View File
@@ -24,15 +24,17 @@
//! f is the maximum number of faulty validators in the system.
//! The data is coded so any f+1 chunks can be used to reconstruct the full data.
use parity_scale_codec::{Encode, Decode};
use polkadot_primitives::v0::{self, Hash as H256, BlakeTwo256, HashT};
use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::AvailableData;
use polkadot_primitives::v0::{self, BlakeTwo256, Hash as H256, HashT};
use sp_core::Blake2Hasher;
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
use thiserror::Error;
use trie::{
trie_types::{TrieDB, TrieDBMut},
MemoryDB, Trie, TrieMut, EMPTY_PREFIX,
};
use novelpoly::WrappedShard;
use novelpoly::CodeParams;
use novelpoly::{CodeParams, WrappedShard};
// we are limited to the field order of GF(2^16), which is 65536
const MAX_VALIDATORS: usize = novelpoly::f2e16::FIELD_SIZE;
@@ -63,7 +65,7 @@ pub enum Error {
UnevenLength,
/// Chunk index out of bounds.
#[error("Chunk is out of bounds: {chunk_index} not included in 0..{n_validators}")]
ChunkIndexOutOfBounds{ chunk_index: usize, n_validators: usize },
ChunkIndexOutOfBounds { chunk_index: usize, n_validators: usize },
/// Bad payload in reconstructed bytes.
#[error("Reconstructed payload invalid")]
BadPayload,
@@ -83,8 +85,12 @@ pub enum Error {
/// Obtain a threshold of chunks that should be enough to recover the data.
pub const fn recovery_threshold(n_validators: usize) -> Result<usize, Error> {
if n_validators > MAX_VALIDATORS { return Err(Error::TooManyValidators) }
if n_validators <= 1 { return Err(Error::NotEnoughValidators) }
if n_validators > MAX_VALIDATORS {
return Err(Error::TooManyValidators)
}
if n_validators <= 1 {
return Err(Error::NotEnoughValidators)
}
let needed = n_validators.saturating_sub(1) / 3;
Ok(needed + 1)
@@ -97,51 +103,47 @@ fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
let k_wanted = recovery_threshold(n_wanted)?;
if n_wanted > MAX_VALIDATORS as usize {
return Err(Error::TooManyValidators);
return Err(Error::TooManyValidators)
}
CodeParams::derive_parameters(n_wanted, k_wanted)
.map_err(|e| {
match e {
novelpoly::Error::WantedShardCountTooHigh(_) => Error::TooManyValidators,
novelpoly::Error::WantedShardCountTooLow(_) => Error::NotEnoughValidators,
_ => Error::UnknownCodeParam,
}
})
CodeParams::derive_parameters(n_wanted, k_wanted).map_err(|e| match e {
novelpoly::Error::WantedShardCountTooHigh(_) => Error::TooManyValidators,
novelpoly::Error::WantedShardCountTooLow(_) => Error::NotEnoughValidators,
_ => Error::UnknownCodeParam,
})
}
/// Obtain erasure-coded chunks for v0 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v0(n_validators: usize, data: &v0::AvailableData)
-> Result<Vec<Vec<u8>>, Error>
{
pub fn obtain_chunks_v0(
n_validators: usize,
data: &v0::AvailableData,
) -> Result<Vec<Vec<u8>>, Error> {
obtain_chunks(n_validators, data)
}
/// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v1(n_validators: usize, data: &AvailableData)
-> Result<Vec<Vec<u8>>, Error>
{
pub fn obtain_chunks_v1(n_validators: usize, data: &AvailableData) -> Result<Vec<Vec<u8>>, Error> {
obtain_chunks(n_validators, data)
}
/// Obtain erasure-coded chunks, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
fn obtain_chunks<T: Encode>(n_validators: usize, data: &T)
-> Result<Vec<Vec<u8>>, Error>
{
fn obtain_chunks<T: Encode>(n_validators: usize, data: &T) -> Result<Vec<Vec<u8>>, Error> {
let params = code_params(n_validators)?;
let encoded = data.encode();
if encoded.is_empty() {
return Err(Error::BadPayload);
return Err(Error::BadPayload)
}
let shards = params.make_encoder().encode::<WrappedShard>(&encoded[..])
let shards = params
.make_encoder()
.encode::<WrappedShard>(&encoded[..])
.expect("Payload non-empty, shard sizes are uniform, and validator numbers checked; qed");
Ok(shards.into_iter().map(|w: WrappedShard| w.into_inner()).collect())
@@ -154,9 +156,9 @@ fn obtain_chunks<T: Encode>(n_validators: usize, data: &T)
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<v0::AvailableData, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I) -> Result<v0::AvailableData, Error>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
reconstruct(n_validators, chunks)
}
@@ -168,9 +170,9 @@ pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I)
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<AvailableData, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I) -> Result<AvailableData, Error>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
reconstruct(n_validators, chunks)
}
@@ -183,42 +185,43 @@ pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I)
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T, Error>
where I: IntoIterator<Item=(&'a [u8], usize)>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
let params = code_params(n_validators)?;
let mut received_shards: Vec<Option<WrappedShard>> = vec![None; n_validators];
let mut shard_len = None;
for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
if chunk_idx >= n_validators {
return Err(Error::ChunkIndexOutOfBounds{ chunk_index: chunk_idx, n_validators });
return Err(Error::ChunkIndexOutOfBounds { chunk_index: chunk_idx, n_validators })
}
let shard_len = shard_len.get_or_insert_with(|| chunk_data.len());
if *shard_len % 2 != 0 {
return Err(Error::UnevenLength);
return Err(Error::UnevenLength)
}
if *shard_len != chunk_data.len() || *shard_len == 0 {
return Err(Error::NonUniformChunks);
return Err(Error::NonUniformChunks)
}
received_shards[chunk_idx] = Some(WrappedShard::new(chunk_data.to_vec()));
}
let res = params.make_encoder().reconstruct(received_shards);
let payload_bytes= match res {
let payload_bytes = match res {
Err(e) => match e {
novelpoly::Error::NeedMoreShards { .. } => return Err(Error::NotEnoughChunks),
novelpoly::Error::ParamterMustBePowerOf2 { .. } => return Err(Error::UnevenLength),
novelpoly::Error::WantedShardCountTooHigh(_) => return Err(Error::TooManyValidators),
novelpoly::Error::WantedShardCountTooLow(_) => return Err(Error::NotEnoughValidators),
novelpoly::Error::PayloadSizeIsZero { .. } => return Err(Error::BadPayload),
novelpoly::Error::InconsistentShardLengths { .. } => return Err(Error::NonUniformChunks),
novelpoly::Error::InconsistentShardLengths { .. } =>
return Err(Error::NonUniformChunks),
_ => return Err(Error::UnknownReconstruction),
}
},
Ok(payload_bytes) => payload_bytes,
};
@@ -236,7 +239,9 @@ pub struct Branches<'a, I> {
impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
/// Get the trie root.
pub fn root(&self) -> H256 { self.root.clone() }
pub fn root(&self) -> H256 {
self.root.clone()
}
}
impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
@@ -249,19 +254,18 @@ impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
.expect("`Branches` is only created with a valid memorydb that contains all nodes for the trie with given root; qed");
let mut recorder = Recorder::new();
let res = (self.current_pos as u32).using_encoded(|s|
trie.get_with(s, &mut recorder)
);
let res = (self.current_pos as u32).using_encoded(|s| trie.get_with(s, &mut recorder));
match res.expect("all nodes in trie present; qed") {
Some(_) => {
let nodes = recorder.drain().into_iter().map(|r| r.data).collect();
let chunk = self.chunks.get(self.current_pos)
.expect("there is a one-to-one mapping of chunks to valid merkle branches; qed");
let chunk = self.chunks.get(self.current_pos).expect(
"there is a one-to-one mapping of chunks to valid merkle branches; qed",
);
self.current_pos += 1;
Some((nodes, chunk.as_ref()))
}
},
None => None,
}
}
@@ -270,7 +274,8 @@ impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
/// Construct a trie from chunks of an erasure-coded value. This returns the root hash and an
/// iterator of merkle proofs, one for each validator.
pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I>
where I: AsRef<[u8]>,
where
I: AsRef<[u8]>,
{
let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
let mut root = H256::default();
@@ -287,12 +292,7 @@ pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I>
}
}
Branches {
trie_storage,
root,
chunks,
current_pos: 0,
}
Branches { trie_storage, root, chunks, current_pos: 0 }
}
/// Verify a merkle branch, yielding the chunk hash meant to be present at that
@@ -304,9 +304,9 @@ pub fn branch_hash(root: &H256, branch_nodes: &[Vec<u8>], index: usize) -> Resul
}
let trie = TrieDB::new(&trie_storage, &root).map_err(|_| Error::InvalidBranchProof)?;
let res = (index as u32).using_encoded(|key|
let res = (index as u32).using_encoded(|key| {
trie.get_with(key, |raw_hash: &[u8]| H256::decode(&mut &raw_hash[..]))
);
});
match res {
Ok(Some(Ok(hash))) => Ok(hash),
@@ -323,7 +323,7 @@ struct ShardInput<'a, I> {
cur_shard: Option<(&'a [u8], usize)>,
}
impl<'a, I: Iterator<Item=&'a [u8]>> parity_scale_codec::Input for ShardInput<'a, I> {
impl<'a, I: Iterator<Item = &'a [u8]>> parity_scale_codec::Input for ShardInput<'a, I> {
fn remaining_len(&mut self) -> Result<Option<usize>, parity_scale_codec::Error> {
Ok(Some(self.remaining_len))
}
@@ -332,7 +332,9 @@ impl<'a, I: Iterator<Item=&'a [u8]>> parity_scale_codec::Input for ShardInput<'a
let mut read_bytes = 0;
loop {
if read_bytes == into.len() { break }
if read_bytes == into.len() {
break
}
let cur_shard = self.cur_shard.take().or_else(|| self.shards.next().map(|s| (s, 0)));
let (active_shard, mut in_shard) = match cur_shard {
@@ -341,15 +343,14 @@ impl<'a, I: Iterator<Item=&'a [u8]>> parity_scale_codec::Input for ShardInput<'a
};
if in_shard >= active_shard.len() {
continue;
continue
}
let remaining_len_out = into.len() - read_bytes;
let remaining_len_shard = active_shard.len() - in_shard;
let write_len = std::cmp::min(remaining_len_out, remaining_len_shard);
into[read_bytes..][..write_len]
.copy_from_slice(&active_shard[in_shard..][..write_len]);
into[read_bytes..][..write_len].copy_from_slice(&active_shard[in_shard..][..write_len]);
in_shard += write_len;
read_bytes += write_len;
@@ -377,59 +378,38 @@ mod tests {
#[test]
fn round_trip_works() {
let pov_block = PoVBlock {
block_data: BlockData((0..255).collect()),
};
let pov_block = PoVBlock { block_data: BlockData((0..255).collect()) };
let available_data = AvailableData {
pov_block,
omitted_validation: Default::default(),
};
let chunks = obtain_chunks(
10,
&available_data,
).unwrap();
let available_data = AvailableData { pov_block, omitted_validation: Default::default() };
let chunks = obtain_chunks(10, &available_data).unwrap();
assert_eq!(chunks.len(), 10);
// any 4 chunks should work.
let reconstructed: AvailableData = reconstruct(
10,
[
(&*chunks[1], 1),
(&*chunks[4], 4),
(&*chunks[6], 6),
(&*chunks[9], 9),
].iter().cloned(),
).unwrap();
[(&*chunks[1], 1), (&*chunks[4], 4), (&*chunks[6], 6), (&*chunks[9], 9)]
.iter()
.cloned(),
)
.unwrap();
assert_eq!(reconstructed, available_data);
}
#[test]
fn reconstruct_does_not_panic_on_low_validator_count() {
let reconstructed = reconstruct_v1(
1,
[].iter().cloned(),
);
let reconstructed = reconstruct_v1(1, [].iter().cloned());
assert_eq!(reconstructed, Err(Error::NotEnoughValidators));
}
#[test]
fn construct_valid_branches() {
let pov_block = PoVBlock {
block_data: BlockData(vec![2; 256]),
};
let pov_block = PoVBlock { block_data: BlockData(vec![2; 256]) };
let available_data = AvailableData {
pov_block,
omitted_validation: Default::default(),
};
let available_data = AvailableData { pov_block, omitted_validation: Default::default() };
let chunks = obtain_chunks(
10,
&available_data,
).unwrap();
let chunks = obtain_chunks(10, &available_data).unwrap();
assert_eq!(chunks.len(), 10);