Fix clippy suggestions. (#179)

* Fix clippy errors.

* Cargo fmt.

* Enable clippy checks.

* Create if does not exist.

* Fix warnings and enable sccache for clippy.

* chmod +x

* Revert and ignore errors.

* Update cancel-workflow-action.

* Fixes.

* Clippy fixes.

* Fix compilation.

* Fix new clippy warnings.

* fmt --all

* Fix the rest.

* fmt --all

* Conditional.

* Bump smallvec.

* Use separate cache dir for clippy to prevent races.

* Remove unused imports in tests

* Remove "useless conversion"

* Move clippy to main worfklow to avoid clashes.

* Fix clippy error.

* Fix remaning clippy errors.

* cargo fmt --all

Co-authored-by: Hernando Castano <castano.ha@gmail.com>
This commit is contained in:
Tomasz Drwięga
2020-07-20 19:17:32 +02:00
committed by Bastian Köcher
parent 65852944e3
commit bdf6901ce2
31 changed files with 159 additions and 197 deletions
-1
View File
@@ -19,7 +19,6 @@ use bridge_node_runtime::{
SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
};
use grandpa_primitives::AuthorityId as GrandpaId;
use sc_service;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{sr25519, Pair, Public};
use sp_runtime::traits::{IdentifyAccount, Verify};
+2 -2
View File
@@ -188,7 +188,7 @@ pub fn new_full(config: Configuration) -> Result<impl AbstractService, ServiceEr
config: grandpa_config,
link: grandpa_link,
network: service.network(),
inherent_data_providers: inherent_data_providers.clone(),
inherent_data_providers,
telemetry_on_connect: Some(service.telemetry_on_connect_stream()),
voting_rule: grandpa::VotingRulesBuilder::default().build(),
prometheus_registry: service.prometheus_registry(),
@@ -218,7 +218,7 @@ pub fn new_light(config: Configuration) -> Result<impl AbstractService, ServiceE
.fetcher()
.ok_or_else(|| "Trying to start light transaction pool without active fetcher")?;
let pool_api = sc_transaction_pool::LightChainApi::new(builder.client().clone(), fetcher.clone());
let pool_api = sc_transaction_pool::LightChainApi::new(builder.client().clone(), fetcher);
let pool = sc_transaction_pool::BasicPool::with_revalidation_type(
builder.config().transaction_pool.clone(),
Arc::new(pool_api),
+1 -5
View File
@@ -193,17 +193,13 @@ pub(crate) fn prepare_ethereum_transaction(
payload: recipient_raw.to_vec(),
};
editor(&mut eth_tx);
eth_tx.sign_by(&signer.into(), Some(chain_id))
eth_tx.sign_by(&signer, Some(chain_id))
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
use sp_bridge_eth_poa::{
signatures::{SecretKey, SignTransaction},
UnsignedTransaction,
};
fn ferdie() -> crate::AccountId {
hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into()
+3 -6
View File
@@ -108,12 +108,11 @@ pub fn genesis_header() -> Header {
gas_limit: 6000000.into(),
difficulty: 131072.into(),
seal: vec![
vec![128].into(),
vec![128],
vec![
184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
.into(),
],
],
}
}
@@ -128,9 +127,7 @@ pub struct PruningStrategy;
impl BridgePruningStrategy for PruningStrategy {
fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 {
best_finalized_number
.checked_sub(FINALIZED_HEADERS_TO_KEEP)
.unwrap_or(0)
best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP)
}
}
+4
View File
@@ -19,6 +19,10 @@
#![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]
// Runtime-generated enums
#![allow(clippy::large_enum_variant)]
// Runtime-generated DecodeLimit::decode_all_With_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
// Make the WASM binary available.
#[cfg(feature = "std")]
+3 -5
View File
@@ -82,10 +82,10 @@ pub fn genesis_header() -> Header {
gas_used: Default::default(),
gas_limit: 0x222222.into(),
difficulty: 0x20000.into(),
seal: vec![vec![0x80].into(), {
seal: vec![vec![0x80], {
let mut vec = vec![0xb8, 0x41];
vec.resize(67, 0);
vec.into()
vec
}],
}
}
@@ -100,9 +100,7 @@ pub struct PruningStrategy;
impl TPruningStrategy for PruningStrategy {
fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 {
best_finalized_number
.checked_sub(FINALIZED_HEADERS_TO_KEEP)
.unwrap_or(0)
best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP)
}
}
+5 -7
View File
@@ -132,7 +132,7 @@ decl_module! {
match deposit_result {
Ok(_) => (),
Err(ExchangeError::DepositPartiallyFailed) => (),
Err(error) => Err(Error::<T>::from(error))?,
Err(error) => return Err(Error::<T>::from(error).into()),
}
Transfers::<T>::insert(&transfer_id, ())
}
@@ -273,12 +273,10 @@ mod tests {
type Amount = u64;
fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> sp_currency_exchange::Result<()> {
if amount < MAX_DEPOSIT_AMOUNT * 10 {
Ok(())
} else if amount == MAX_DEPOSIT_AMOUNT * 10 {
Err(ExchangeError::DepositPartiallyFailed)
} else {
Err(ExchangeError::DepositFailed)
match amount {
amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()),
amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed),
_ => Err(ExchangeError::DepositFailed),
}
}
}
@@ -35,7 +35,7 @@ pub enum Error {
/// Failed to decode finality proof.
FinalityProofDecode(codec::Error),
/// Failed to verify justification.
JustificationVerify(ClientError),
JustificationVerify(Box<ClientError>),
}
/// Substrate header.
@@ -120,6 +120,7 @@ pub fn verify_substrate_finality_proof(
best_set_id,
&best_set,
)
.map_err(Box::new)
.map_err(Error::JustificationVerify)
.map(|_| ())
}
@@ -246,17 +247,16 @@ mod tests {
/// Number of the example block with justification.
const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8;
/// Hash of the example block with justification.
const EXAMPLE_JUSTIFIED_BLOCK_HASH: &'static str =
"a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
/// Id of authorities set that have generated example justification. Could be computed by tracking
/// every set change in canonized headers.
const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0;
/// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange`
/// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at
/// appropriate block.
const EXAMPLE_AUTHORITIES_SET: &'static str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000";
const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000";
/// Example justification. Could be fetched by calling 'chain_getBlock' RPC.
const EXAMPLE_JUSTIFICATION: &'static str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900";
const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900";
#[test]
fn substrate_header_parse_fails() {
+7 -15
View File
@@ -99,7 +99,7 @@ pub fn finalize_blocks<S: Storage>(
*hash == header_validators.0.hash || *hash == best_finalized.hash
})
})
.unwrap_or_else(|| CachedFinalityVotes::default()),
.unwrap_or_default(),
best_finalized,
&validators,
id,
@@ -247,7 +247,7 @@ fn empty_steps_signers(header: &Header) -> BTreeSet<Address> {
header
.empty_steps()
.into_iter()
.flat_map(|steps| steps)
.flatten()
.filter_map(|step| empty_step_signer(&step, &header.parent_hash))
.collect::<BTreeSet<_>>()
}
@@ -462,13 +462,9 @@ mod tests {
// when we're inserting header#7 and last finalized header is 0:
// check that votes at #7 are computed correctly without cache
let expected_votes_at_7 = FinalityVotes {
votes: vec![
(ctx.addresses[0].clone(), 3),
(ctx.addresses[1].clone(), 3),
(ctx.addresses[2].clone(), 1),
]
.into_iter()
.collect(),
votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)]
.into_iter()
.collect(),
ancestry: ancestry[..7].iter().cloned().collect(),
};
let id7 = headers[6].compute_id();
@@ -491,9 +487,7 @@ mod tests {
// cached votes at #5
let expected_votes_at_5 = FinalityVotes {
votes: vec![(ctx.addresses[0].clone(), 3), (ctx.addresses[1].clone(), 2)]
.into_iter()
.collect(),
votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(),
ancestry: ancestry[..5].iter().cloned().collect(),
};
FinalityCache::<TestRuntime>::insert(hashes[4], expected_votes_at_5);
@@ -520,9 +514,7 @@ mod tests {
// when we're inserting header#7 and last finalized header is 3:
// check that votes at #7 are computed correctly with cache
let expected_votes_at_7 = FinalityVotes {
votes: vec![(ctx.addresses[1].clone(), 3), (ctx.addresses[2].clone(), 1)]
.into_iter()
.collect(),
votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(),
ancestry: ancestry[3..7].iter().cloned().collect(),
};
assert_eq!(
+4 -1
View File
@@ -70,6 +70,9 @@ pub fn import_headers<S: Storage, PS: PruningStrategy>(
Ok((useful, useless))
}
/// A vector of finalized headers and their submitters.
pub type FinalizedHeaders<S> = Vec<(HeaderId, Option<<S as Storage>::Submitter>)>;
/// Imports given header and updates blocks finality (if required).
///
/// Transactions receipts must be provided if `header_import_requires_receipts()`
@@ -84,7 +87,7 @@ pub fn import_header<S: Storage, PS: PruningStrategy>(
submitter: Option<S::Submitter>,
header: Header,
receipts: Option<Vec<Receipt>>,
) -> Result<(HeaderId, Vec<(HeaderId, Option<S::Submitter>)>), Error> {
) -> Result<(HeaderId, FinalizedHeaders<S>), Error> {
// first check that we are able to import this header at all
let (header_id, finalized_id) = is_importable_header(storage, &header)?;
+21 -26
View File
@@ -15,6 +15,8 @@
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
#![cfg_attr(not(feature = "std"), no_std)]
// Runtime-generated enums
#![allow(clippy::large_enum_variant)]
use crate::finality::{CachedFinalityVotes, FinalityVotes};
use codec::{Decode, Encode};
@@ -235,6 +237,7 @@ impl<Submitter> ImportContext<Submitter> {
}
/// Converts import context into header we're going to import.
#[allow(clippy::too_many_arguments)]
pub fn into_import_header(
self,
is_best: bool,
@@ -503,7 +506,7 @@ impl<T: Trait> Module<T> {
}
/// Verify that transaction is included into given finalized block.
pub fn verify_transaction_finalized(block: H256, tx_index: u64, proof: &Vec<RawTransaction>) -> bool {
pub fn verify_transaction_finalized(block: H256, tx_index: u64, proof: &[RawTransaction]) -> bool {
crate::verify_transaction_finalized(&BridgeStorage::<T>::new(), block, tx_index, proof)
}
}
@@ -616,13 +619,8 @@ impl<T: Trait> BridgeStorage<T> {
blocks_at_number: &mut Vec<H256>,
) {
// ensure that unfinalized headers we want to prune do not have scheduled changes
if number > finalized_number {
if blocks_at_number
.iter()
.any(|block| ScheduledChanges::contains_key(block))
{
return;
}
if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::contains_key) {
return;
}
// physically remove headers and (probably) obsolete validators sets
@@ -678,11 +676,9 @@ impl<T: Trait> Storage for BridgeStorage<T> {
let mut current_id = *parent;
loop {
// if we have reached finalized block' sibling => stop with special signal
if current_id.number == best_finalized.number {
if current_id.hash != best_finalized.hash {
votes.stopped_at_finalized_sibling = true;
return votes;
}
if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash {
votes.stopped_at_finalized_sibling = true;
return votes;
}
// if we have reached target header => stop
@@ -834,6 +830,7 @@ impl<T: Trait> Storage for BridgeStorage<T> {
}
/// Initialize storage.
#[cfg(any(feature = "std", feature = "runtime-benchmarks"))]
pub(crate) fn initialize_storage<T: Trait>(
initial_header: &Header,
initial_difficulty: U256,
@@ -885,7 +882,7 @@ pub fn verify_transaction_finalized<S: Storage>(
storage: &S,
block: H256,
tx_index: u64,
proof: &Vec<RawTransaction>,
proof: &[RawTransaction],
) -> bool {
if tx_index >= proof.len() as _ {
return false;
@@ -906,9 +903,7 @@ pub fn verify_transaction_finalized<S: Storage>(
let is_finalized = match header.number < finalized.number {
true => ancestry(storage, finalized.hash)
.skip_while(|(_, ancestor)| ancestor.number > header.number)
.filter(|&(ancestor_hash, _)| ancestor_hash == block)
.next()
.is_some(),
.any(|(ancestor_hash, _)| ancestor_hash == block),
false => block == finalized.hash,
};
if !is_finalized {
@@ -985,7 +980,7 @@ pub(crate) mod tests {
hash,
StoredHeader {
submitter: None,
header: header,
header,
total_difficulty: 0.into(),
next_validators_set_id: 0,
last_signal_block: None,
@@ -1266,7 +1261,7 @@ pub(crate) mod tests {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &vec![example_tx()],),
verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &[example_tx()],),
true,
);
});
@@ -1280,7 +1275,7 @@ pub(crate) mod tests {
insert_header(&mut storage, example_header());
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(&storage, example_header_parent().compute_hash(), 0, &vec![example_tx()],),
verify_transaction_finalized(&storage, example_header_parent().compute_hash(), 0, &[example_tx()],),
true,
);
});
@@ -1291,7 +1286,7 @@ pub(crate) mod tests {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &vec![],),
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],),
false,
);
});
@@ -1302,7 +1297,7 @@ pub(crate) mod tests {
run_test(TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &vec![],),
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],),
false,
);
});
@@ -1315,7 +1310,7 @@ pub(crate) mod tests {
insert_header(&mut storage, example_header_parent());
insert_header(&mut storage, example_header());
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &vec![example_tx()],),
verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &[example_tx()],),
false,
);
});
@@ -1334,7 +1329,7 @@ pub(crate) mod tests {
insert_header(&mut storage, finalized_header_sibling);
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(&storage, finalized_header_sibling_hash, 0, &vec![example_tx()],),
verify_transaction_finalized(&storage, finalized_header_sibling_hash, 0, &[example_tx()],),
false,
);
});
@@ -1353,7 +1348,7 @@ pub(crate) mod tests {
insert_header(&mut storage, example_header());
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(&storage, finalized_header_uncle_hash, 0, &vec![example_tx()],),
verify_transaction_finalized(&storage, finalized_header_uncle_hash, 0, &[example_tx()],),
false,
);
});
@@ -1368,7 +1363,7 @@ pub(crate) mod tests {
&storage,
example_header().compute_hash(),
0,
&vec![example_tx(), example_tx(),],
&[example_tx(), example_tx()],
),
false,
);
+1 -1
View File
@@ -160,6 +160,6 @@ impl Default for KeepSomeHeadersBehindBest {
impl PruningStrategy for KeepSomeHeadersBehindBest {
fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 {
best_number.checked_sub(self.0).unwrap_or(0)
best_number.saturating_sub(self.0)
}
}
+2 -2
View File
@@ -86,7 +86,7 @@ impl HeaderBuilder {
use crate::HeadersByNumber;
use frame_support::StorageMap;
let parent_hash = HeadersByNumber::get(parent_number).unwrap()[0].clone();
let parent_hash = HeadersByNumber::get(parent_number).unwrap()[0];
Self::with_parent_hash_on_runtime::<T>(parent_hash)
}
@@ -130,7 +130,7 @@ impl HeaderBuilder {
/// Adds empty steps to this header.
pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self {
let sealed_empty_steps = empty_steps
.into_iter()
.iter()
.map(|(author, step)| {
let mut empty_step = SealedEmptyStep {
step: *step,
+7 -8
View File
@@ -20,7 +20,7 @@ use primitives::{Address, Header, HeaderId, LogEntry, Receipt, U256};
use sp_std::prelude::*;
/// The hash of InitiateChange event of the validators set contract.
pub(crate) const CHANGE_EVENT_HASH: &'static [u8; 32] = &[
pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[
0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28,
0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89,
];
@@ -49,6 +49,9 @@ pub enum ValidatorsSource {
Contract(Address, Vec<Address>),
}
/// A short hand for optional validators change.
pub type ValidatorsChange = Option<Vec<Address>>;
/// Validators manager.
pub struct Validators<'a> {
config: &'a ValidatorsConfiguration,
@@ -94,7 +97,7 @@ impl<'a> Validators<'a> {
&self,
header: &Header,
receipts: Option<Vec<Receipt>>,
) -> Result<(Option<Vec<Address>>, Option<Vec<Address>>), Error> {
) -> Result<(ValidatorsChange, ValidatorsChange), Error> {
// let's first check if new source is starting from this header
let (source_index, _, source) = self.source_at(header.number);
let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number);
@@ -223,7 +226,7 @@ impl<'a> Validators<'a> {
}
/// Returns source of validators that should author the header.
fn source_at<'b>(&'b self, header_number: u64) -> (usize, u64, &'b ValidatorsSource) {
fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) {
match self.config {
ValidatorsConfiguration::Single(ref source) => (0, 0, source),
ValidatorsConfiguration::Multi(ref sources) => sources
@@ -240,11 +243,7 @@ impl<'a> Validators<'a> {
}
/// Returns source of validators that should author the next header.
fn source_at_next_header<'b>(
&'b self,
header_source_index: usize,
header_number: u64,
) -> (u64, &'b ValidatorsSource) {
fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) {
match self.config {
ValidatorsConfiguration::Single(ref source) => (0, source),
ValidatorsConfiguration::Multi(ref sources) => {
+13 -9
View File
@@ -22,6 +22,7 @@ use primitives::{
public_to_address, step_validator, Address, Header, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256,
};
use sp_io::crypto::secp256k1_ecdsa_recover;
use sp_runtime::transaction_validity::TransactionTag;
use sp_std::{vec, vec::Vec};
/// Pre-check to see if should try and import this header.
@@ -43,6 +44,8 @@ pub fn is_importable_header<S: Storage>(storage: &S, header: &Header) -> Result<
}
/// Try accept unsigned aura header into transaction pool.
///
/// Returns required and provided tags.
pub fn accept_aura_header_into_pool<S: Storage>(
storage: &S,
config: &AuraConfiguration,
@@ -50,7 +53,7 @@ pub fn accept_aura_header_into_pool<S: Storage>(
pool_config: &PoolConfiguration,
header: &Header,
receipts: Option<&Vec<Receipt>>,
) -> Result<(Vec<Vec<u8>>, Vec<Vec<u8>>), Error> {
) -> Result<(Vec<TransactionTag>, Vec<TransactionTag>), Error> {
// check if we can verify further
let (header_id, _) = is_importable_header(storage, header)?;
@@ -365,6 +368,7 @@ mod tests {
use frame_support::{StorageMap, StorageValue};
use primitives::{compute_merkle_root, rlp_encode, TransactionOutcome, H520};
use secp256k1::SecretKey;
use sp_runtime::transaction_validity::TransactionTag;
const GENESIS_STEP: u64 = 42;
const TOTAL_VALIDATORS: usize = 3;
@@ -386,7 +390,7 @@ mod tests {
fn default_accept_into_pool(
mut make_header: impl FnMut(&[SecretKey]) -> (Header, Option<Vec<Receipt>>),
) -> Result<(Vec<Vec<u8>>, Vec<Vec<u8>>), Error> {
) -> Result<(Vec<TransactionTag>, Vec<TransactionTag>), Error> {
run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| {
let validators = vec![validator(0), validator(1), validator(2)];
let mut storage = BridgeStorage::<TestRuntime>::new();
@@ -429,7 +433,7 @@ mod tests {
},
);
let header_hash = HeadersByNumber::get(&number).unwrap()[0].clone();
let header_hash = HeadersByNumber::get(&number).unwrap()[0];
let mut header = Headers::<TestRuntime>::get(&header_hash).unwrap();
header.next_validators_set_id = set_id;
if let Some(signalled_set) = signalled_set {
@@ -456,15 +460,15 @@ mod tests {
assert_eq!(default_verify(&header), Err(Error::InvalidSealArity));
// when there's single seal (we expect 2 or 3 seals)
header.seal = vec![vec![].into()];
header.seal = vec![vec![]];
assert_eq!(default_verify(&header), Err(Error::InvalidSealArity));
// when there's 3 seals (we expect 2 by default)
header.seal = vec![vec![].into(), vec![].into(), vec![].into()];
header.seal = vec![vec![], vec![], vec![]];
assert_eq!(default_verify(&header), Err(Error::InvalidSealArity));
// when there's 2 seals
header.seal = vec![vec![].into(), vec![].into()];
header.seal = vec![vec![], vec![]];
assert_ne!(default_verify(&header), Err(Error::InvalidSealArity));
}
@@ -564,7 +568,7 @@ mod tests {
fn verifies_step() {
// when step is missing from seals
let mut header = Header {
seal: vec![vec![].into(), vec![].into()],
seal: vec![vec![], vec![]],
gas_limit: test_aura_config().min_gas_limit,
parent_hash: genesis().compute_hash(),
..Default::default()
@@ -585,7 +589,7 @@ mod tests {
// when step is lesser that for the parent block
header.seal[0] = rlp_encode(&40u64);
header.seal = vec![vec![40].into(), vec![].into()];
header.seal = vec![vec![40], vec![]];
assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote));
// when step is OK
@@ -691,7 +695,7 @@ mod tests {
default_accept_into_pool(|_| (
Header {
number: 20_000_000,
seal: vec![vec![].into(), vec![].into()],
seal: vec![vec![], vec![]],
gas_limit: test_aura_config().min_gas_limit,
log_bloom: (&[0xff; 256]).into(),
..Default::default()
+2 -2
View File
@@ -157,9 +157,9 @@ impl<T: Trait> Module<T> {
fn check_validator_set_proof(
state_root: &T::Hash,
proof: StorageProof,
validator_set: &Vec<(AuthorityId, AuthorityWeight)>,
validator_set: &[(AuthorityId, AuthorityWeight)],
) -> DispatchResult {
let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof.clone());
let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof);
let checker = checker.map_err(Self::map_storage_err)?;
+7 -3
View File
@@ -15,6 +15,10 @@
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
#![cfg_attr(not(feature = "std"), no_std)]
// RuntimeApi generated functions
#![allow(clippy::too_many_arguments)]
// Generated by `DecodeLimit::decode_with_depth_limit`
#![allow(clippy::unnecessary_mut_passed)]
pub use parity_bytes::Bytes;
pub use primitive_types::{H160, H256, H512, U128, U256};
@@ -206,7 +210,7 @@ impl Header {
/// Check if passed transactions are matching transactions root in this header.
pub fn verify_transactions_root(&self, transactions: &[RawTransaction]) -> bool {
verify_merkle_proof(self.transactions_root, transactions.into_iter())
verify_merkle_proof(self.transactions_root, transactions.iter())
}
/// Gets the seal hash of this header.
@@ -581,7 +585,7 @@ mod tests {
gas: 86016.into(),
to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()),
value: 0.into(),
payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec().into(),
payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(),
},
}),
);
@@ -600,7 +604,7 @@ mod tests {
gas: 160000.into(),
to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()),
value: 0.into(),
payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec().into(),
payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(),
},
}),
);
@@ -100,7 +100,7 @@ impl SourceClient<EthereumToSubstrateExchange> for EthereumTransactionsSource {
eth_header_id: &EthereumHeaderId,
eth_tx: EthereumTransaction,
) -> Result<EthereumTransactionInclusionProof, Self::Error> {
const TRANSACTION_HAS_RAW_FIELD_PROOF: &'static str = "RPC level checks that transactions from Ethereum\
const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\
node are having `raw` field; qed";
let eth_header = self.client.header_by_hash_with_transactions(eth_header_id.1).await?;
@@ -163,7 +163,7 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
&self,
headers: Vec<QueuedEthereumHeader>,
) -> SubmittedHeaders<EthereumHeaderId, Self::Error> {
let (sign_params, sign_transactions) = (self.sign_params.clone(), self.sign_transactions.clone());
let (sign_params, sign_transactions) = (self.sign_params.clone(), self.sign_transactions);
self.client
.submit_ethereum_headers(sign_params, headers, sign_transactions)
.await
@@ -173,6 +173,7 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
Ok(HashSet::new())
}
#[allow(clippy::unit_arg)]
async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result<EthereumHeaderId, Self::Error> {
Ok(id)
}
@@ -22,11 +22,11 @@ pub use web3::types::{Address, Bytes, CallRequest, H256, U128, U256, U64};
/// When header is just received from the Ethereum node, we check that it has
/// both number and hash fields filled.
pub const HEADER_ID_PROOF: &'static str = "checked on retrieval; qed";
pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed";
/// When receipt is just received from the Ethereum node, we check that it has
/// gas_used field filled.
pub const RECEIPT_GAS_USED_PROOF: &'static str = "checked on retrieval; qed";
pub const RECEIPT_GAS_USED_PROOF: &str = "checked on retrieval; qed";
/// Ethereum transaction hash type.
pub type TransactionHash = H256;
+4 -4
View File
@@ -323,7 +323,7 @@ mod tests {
&self,
_: &TestTransactionHash,
) -> Result<Option<(TestHeaderId, TestTransaction)>, TestError> {
self.data.lock().transaction.clone()
self.data.lock().transaction
}
async fn transaction_proof(
@@ -331,7 +331,7 @@ mod tests {
_: &TestHeaderId,
_: TestTransaction,
) -> Result<TestTransactionProof, TestError> {
self.data.lock().transaction_proof.clone()
self.data.lock().transaction_proof
}
}
@@ -368,11 +368,11 @@ mod tests {
}
async fn is_header_known(&self, _: &TestHeaderId) -> Result<bool, TestError> {
self.data.lock().is_header_known.clone()
self.data.lock().is_header_known
}
async fn is_header_finalized(&self, _: &TestHeaderId) -> Result<bool, TestError> {
self.data.lock().is_header_finalized.clone()
self.data.lock().is_header_finalized
}
async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> {
+28 -38
View File
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use crate::sync_types::{HeaderId, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader};
use crate::sync_types::{HeaderId, HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader};
use linked_hash_map::LinkedHashMap;
use num_traits::{One, Zero};
use std::{
@@ -59,9 +59,9 @@ pub struct QueuedHeaders<P: HeadersSyncPipeline> {
known_headers: KnownHeaders<P>,
/// Headers that are waiting for completion data from source node. Mapped (and auto-sorted
/// by) to the last fetch time.
incomplete_headers: LinkedHashMap<HeaderId<P::Hash, P::Number>, Option<Instant>>,
incomplete_headers: LinkedHashMap<HeaderIdOf<P>, Option<Instant>>,
/// Headers that are waiting to be completed at target node. Auto-sorted by insertion time.
completion_data: LinkedHashMap<HeaderId<P::Hash, P::Number>, P::Completion>,
completion_data: LinkedHashMap<HeaderIdOf<P>, P::Completion>,
/// Best synced block number.
best_synced_number: P::Number,
/// Pruned blocks border. We do not store or accept any blocks with number less than
@@ -106,7 +106,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
/// Returns number of headers that are currently in given queue.
pub fn headers_in_status(&self, status: HeaderStatus) -> usize {
match status {
HeaderStatus::Unknown | HeaderStatus::Synced => return 0,
HeaderStatus::Unknown | HeaderStatus::Synced => 0,
HeaderStatus::MaybeOrphan => self
.maybe_orphan
.values()
@@ -168,7 +168,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Returns synchronization status of the header.
pub fn status(&self, id: &HeaderId<P::Hash, P::Number>) -> HeaderStatus {
pub fn status(&self, id: &HeaderIdOf<P>) -> HeaderStatus {
self.known_headers
.get(&id.0)
.and_then(|x| x.get(&id.1))
@@ -179,7 +179,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
/// Get oldest header from given queue.
pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader<P>> {
match status {
HeaderStatus::Unknown | HeaderStatus::Synced => return None,
HeaderStatus::Unknown | HeaderStatus::Synced => None,
HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan),
HeaderStatus::Orphan => oldest_header(&self.orphan),
HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra),
@@ -197,7 +197,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
f: impl FnMut(&QueuedHeader<P>) -> bool,
) -> Option<Vec<&QueuedHeader<P>>> {
match status {
HeaderStatus::Unknown | HeaderStatus::Synced => return None,
HeaderStatus::Unknown | HeaderStatus::Synced => None,
HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f),
HeaderStatus::Orphan => oldest_headers(&self.orphan, f),
HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f),
@@ -268,12 +268,12 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Receive best header from the target node.
pub fn target_best_header_response(&mut self, id: &HeaderId<P::Hash, P::Number>) {
pub fn target_best_header_response(&mut self, id: &HeaderIdOf<P>) {
self.header_synced(id)
}
/// Receive target node response for MaybeOrphan request.
pub fn maybe_orphan_response(&mut self, id: &HeaderId<P::Hash, P::Number>, response: bool) {
pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf<P>, response: bool) {
if !response {
move_header_descendants::<P>(
&mut [&mut self.maybe_orphan],
@@ -295,7 +295,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Receive target node response for MaybeExtra request.
pub fn maybe_extra_response(&mut self, id: &HeaderId<P::Hash, P::Number>, response: bool) {
pub fn maybe_extra_response(&mut self, id: &HeaderIdOf<P>, response: bool) {
let (destination_status, destination_queue) = if response {
(HeaderStatus::Extra, &mut self.extra)
} else if self.is_parent_incomplete(id) {
@@ -315,7 +315,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Receive extra from source node.
pub fn extra_response(&mut self, id: &HeaderId<P::Hash, P::Number>, extra: P::Extra) {
pub fn extra_response(&mut self, id: &HeaderIdOf<P>, extra: P::Extra) {
let (destination_status, destination_queue) = if self.is_parent_incomplete(id) {
(HeaderStatus::Incomplete, &mut self.incomplete)
} else {
@@ -334,7 +334,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Receive completion response from source node.
pub fn completion_response(&mut self, id: &HeaderId<P::Hash, P::Number>, completion: Option<P::Completion>) {
pub fn completion_response(&mut self, id: &HeaderIdOf<P>, completion: Option<P::Completion>) {
let completion = match completion {
Some(completion) => completion,
None => {
@@ -361,12 +361,12 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
id,
);
self.completion_data.insert(id.clone(), completion);
self.completion_data.insert(*id, completion);
}
}
/// When header is submitted to target node.
pub fn headers_submitted(&mut self, ids: Vec<HeaderId<P::Hash, P::Number>>) {
pub fn headers_submitted(&mut self, ids: Vec<HeaderIdOf<P>>) {
for id in ids {
move_header(
&mut self.ready,
@@ -380,7 +380,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// When header completion data is sent to target node.
pub fn header_completed(&mut self, id: &HeaderId<P::Hash, P::Number>) {
pub fn header_completed(&mut self, id: &HeaderIdOf<P>) {
if self.completion_data.remove(id).is_some() {
log::debug!(
target: "bridge",
@@ -404,7 +404,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Marks given headers incomplete.
pub fn add_incomplete_headers(&mut self, new_incomplete_headers: Vec<HeaderId<P::Hash, P::Number>>) {
pub fn add_incomplete_headers(&mut self, new_incomplete_headers: Vec<HeaderIdOf<P>>) {
for new_incomplete_header in new_incomplete_headers {
self.header_synced(&new_incomplete_header);
move_header_descendants::<P>(
@@ -426,7 +426,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// When incomplete headers ids are receved from target node.
pub fn incomplete_headers_response(&mut self, ids: HashSet<HeaderId<P::Hash, P::Number>>) {
pub fn incomplete_headers_response(&mut self, ids: HashSet<HeaderIdOf<P>>) {
// all new incomplete headers are marked Synced and all their descendants
// are moved from Ready/Submitted to Incomplete queue
let new_incomplete_headers = ids
@@ -466,7 +466,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Returns id of the header for which we want to fetch completion data.
pub fn incomplete_header(&mut self) -> Option<HeaderId<P::Hash, P::Number>> {
pub fn incomplete_header(&mut self) -> Option<HeaderIdOf<P>> {
queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| {
let retry = match *last_fetch_time {
Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL,
@@ -483,7 +483,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Returns header completion data to upload to target node.
pub fn header_to_complete(&mut self) -> Option<(HeaderId<P::Hash, P::Number>, &P::Completion)> {
pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf<P>, &P::Completion)> {
queued_incomplete_header(&mut self.completion_data, |_| true)
}
@@ -520,7 +520,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
/// Returns true if parent of this header is either incomplete or waiting for
/// its own incomplete ancestor to be completed.
fn is_parent_incomplete(&self, id: &HeaderId<P::Hash, P::Number>) -> bool {
fn is_parent_incomplete(&self, id: &HeaderIdOf<P>) -> bool {
let status = self.status(id);
let header = match status {
HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id),
@@ -546,7 +546,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// When we receive new Synced header from target node.
fn header_synced(&mut self, id: &HeaderId<P::Hash, P::Number>) {
fn header_synced(&mut self, id: &HeaderIdOf<P>) {
// update best synced block number
self.best_synced_number = std::cmp::max(self.best_synced_number, id.0);
@@ -593,19 +593,12 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
}
/// Insert header to the queue.
fn insert_header<P: HeadersSyncPipeline>(
queue: &mut HeadersQueue<P>,
id: HeaderId<P::Hash, P::Number>,
header: QueuedHeader<P>,
) {
fn insert_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: HeaderIdOf<P>, header: QueuedHeader<P>) {
queue.entry(id.0).or_default().insert(id.1, header);
}
/// Remove header from the queue.
fn remove_header<P: HeadersSyncPipeline>(
queue: &mut HeadersQueue<P>,
id: &HeaderId<P::Hash, P::Number>,
) -> Option<QueuedHeader<P>> {
fn remove_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<QueuedHeader<P>> {
let mut headers_at = match queue.entry(id.0) {
BTreeMapEntry::Occupied(headers_at) => headers_at,
BTreeMapEntry::Vacant(_) => return None,
@@ -619,10 +612,7 @@ fn remove_header<P: HeadersSyncPipeline>(
}
/// Get header from the queue.
fn header<'a, P: HeadersSyncPipeline>(
queue: &'a HeadersQueue<P>,
id: &HeaderId<P::Hash, P::Number>,
) -> Option<&'a QueuedHeader<P>> {
fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<&'a QueuedHeader<P>> {
queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1))
}
@@ -634,9 +624,9 @@ fn move_header<P: HeadersSyncPipeline>(
destination_queue: &mut HeadersQueue<P>,
known_headers: &mut KnownHeaders<P>,
destination_status: HeaderStatus,
id: &HeaderId<P::Hash, P::Number>,
id: &HeaderIdOf<P>,
prepare: impl FnOnce(QueuedHeader<P>) -> QueuedHeader<P>,
) -> Option<HeaderId<P::Hash, P::Number>> {
) -> Option<HeaderIdOf<P>> {
let header = match remove_header(source_queue, id) {
Some(header) => prepare(header),
None => return None,
@@ -655,7 +645,7 @@ fn move_header_descendants<P: HeadersSyncPipeline>(
destination_queue: &mut HeadersQueue<P>,
known_headers: &mut KnownHeaders<P>,
destination_status: HeaderStatus,
id: &HeaderId<P::Hash, P::Number>,
id: &HeaderIdOf<P>,
) {
let mut current_number = id.0 + One::one();
let mut current_parents = HashSet::new();
@@ -743,7 +733,7 @@ fn prune_known_headers<P: HeadersSyncPipeline>(known_headers: &mut KnownHeaders<
/// Change header status.
fn set_header_status<P: HeadersSyncPipeline>(
known_headers: &mut KnownHeaders<P>,
id: &HeaderId<P::Hash, P::Number>,
id: &HeaderIdOf<P>,
status: HeaderStatus,
) {
log::debug!(
-1
View File
@@ -101,7 +101,6 @@ fn main() {
}
("", _) => {
log::error!(target: "bridge", "No subcommand specified");
return;
}
_ => unreachable!("all possible subcommands are checked above; qed"),
}
+1 -5
View File
@@ -18,7 +18,6 @@ use crate::ethereum_types::{EthereumHeaderId, TransactionHash as EthereumTransac
use crate::sync_types::MaybeConnectionError;
use jsonrpsee::client::RequestError;
use serde_json;
/// Contains common errors that can occur when
/// interacting with a Substrate or Ethereum node
@@ -79,10 +78,7 @@ impl From<ethabi::Error> for RpcError {
impl MaybeConnectionError for RpcError {
fn is_connection_error(&self) -> bool {
match *self {
RpcError::Request(RequestError::TransportError(_)) => true,
_ => false,
}
matches!(*self, RpcError::Request(RequestError::TransportError(_)))
}
}
@@ -400,5 +400,5 @@ fn create_signed_transaction(
let signer: sp_runtime::MultiSigner = signer.public().into();
let (function, extra, _) = raw_payload.deconstruct();
bridge_node_runtime::UncheckedExtrinsic::new_signed(function, signer.into_account().into(), signature.into(), extra)
bridge_node_runtime::UncheckedExtrinsic::new_signed(function, signer.into_account(), signature.into(), extra)
}
+7 -7
View File
@@ -15,7 +15,7 @@
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use crate::headers::QueuedHeaders;
use crate::sync_types::{HeaderId, HeaderStatus, HeadersSyncPipeline, QueuedHeader};
use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader};
use num_traits::{One, Saturating, Zero};
/// Common sync params.
@@ -56,7 +56,7 @@ pub struct HeadersSync<P: HeadersSyncPipeline> {
/// Best header number known to source node.
source_best_number: Option<P::Number>,
/// Best header known to target node.
target_best_header: Option<HeaderId<P::Hash, P::Number>>,
target_best_header: Option<HeaderIdOf<P>>,
/// Headers queue.
headers: QueuedHeaders<P>,
}
@@ -78,7 +78,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
}
/// Best header known to target node.
pub fn target_best_header(&self) -> Option<HeaderId<P::Hash, P::Number>> {
pub fn target_best_header(&self) -> Option<HeaderIdOf<P>> {
self.target_best_header
}
@@ -94,7 +94,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
}
/// Returns synchronization status.
pub fn status(&self) -> (&Option<HeaderId<P::Hash, P::Number>>, &Option<P::Number>) {
pub fn status(&self) -> (&Option<HeaderIdOf<P>>, &Option<P::Number>) {
(&self.target_best_header, &self.source_best_number)
}
@@ -111,7 +111,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
/// Select header that needs to be downloaded from the source node.
pub fn select_new_header_to_download(&self) -> Option<P::Number> {
// if we haven't received best header from source node yet, there's nothing we can download
let source_best_number = self.source_best_number.clone()?;
let source_best_number = self.source_best_number?;
// if we haven't received known best header from target node yet, there's nothing we can download
let target_best_header = self.target_best_header.as_ref()?;
@@ -205,7 +205,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
/// Receive new best header from the target node.
/// Returns true if it is different from the previous block known to us.
pub fn target_best_header_response(&mut self, best_header: HeaderId<P::Hash, P::Number>) -> bool {
pub fn target_best_header_response(&mut self, best_header: HeaderIdOf<P>) -> bool {
log::debug!(
target: "bridge",
"Received best known header from {}: {:?}",
@@ -244,7 +244,7 @@ pub mod tests {
use super::*;
use crate::ethereum_types::{EthereumHeadersSyncPipeline, H256};
use crate::headers::tests::{header, id};
use crate::sync_types::HeaderStatus;
use crate::sync_types::{HeaderId, HeaderStatus};
fn side_hash(number: u64) -> H256 {
H256::from_low_u64_le(1000 + number)
+14 -28
View File
@@ -18,7 +18,7 @@ use crate::metrics::{start as metrics_start, GlobalMetrics, MetricsParams, Regis
use crate::sync::HeadersSyncParams;
use crate::sync_loop_metrics::SyncLoopMetrics;
use crate::sync_types::{
HeaderId, HeaderStatus, HeadersSyncPipeline, MaybeConnectionError, QueuedHeader, SubmittedHeaders,
HeaderIdOf, HeaderStatus, HeadersSyncPipeline, MaybeConnectionError, QueuedHeader, SubmittedHeaders,
};
use async_trait::async_trait;
@@ -68,17 +68,15 @@ pub trait SourceClient<P: HeadersSyncPipeline>: Sized {
async fn header_by_number(&self, number: P::Number) -> Result<P::Header, Self::Error>;
/// Get completion data by header hash.
async fn header_completion(
&self,
id: HeaderId<P::Hash, P::Number>,
) -> Result<(HeaderId<P::Hash, P::Number>, Option<P::Completion>), Self::Error>;
async fn header_completion(&self, id: HeaderIdOf<P>)
-> Result<(HeaderIdOf<P>, Option<P::Completion>), Self::Error>;
/// Get extra data by header hash.
async fn header_extra(
&self,
id: HeaderId<P::Hash, P::Number>,
id: HeaderIdOf<P>,
header: QueuedHeader<P>,
) -> Result<(HeaderId<P::Hash, P::Number>, P::Extra), Self::Error>;
) -> Result<(HeaderIdOf<P>, P::Extra), Self::Error>;
}
/// Target client trait.
@@ -88,35 +86,23 @@ pub trait TargetClient<P: HeadersSyncPipeline>: Sized {
type Error: std::fmt::Debug + MaybeConnectionError;
/// Returns ID of best header known to the target node.
async fn best_header_id(&self) -> Result<HeaderId<P::Hash, P::Number>, Self::Error>;
async fn best_header_id(&self) -> Result<HeaderIdOf<P>, Self::Error>;
/// Returns true if header is known to the target node.
async fn is_known_header(
&self,
id: HeaderId<P::Hash, P::Number>,
) -> Result<(HeaderId<P::Hash, P::Number>, bool), Self::Error>;
async fn is_known_header(&self, id: HeaderIdOf<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
/// Submit headers.
async fn submit_headers(
&self,
headers: Vec<QueuedHeader<P>>,
) -> SubmittedHeaders<HeaderId<P::Hash, P::Number>, Self::Error>;
async fn submit_headers(&self, headers: Vec<QueuedHeader<P>>) -> SubmittedHeaders<HeaderIdOf<P>, Self::Error>;
/// Returns ID of headers that require to be 'completed' before children can be submitted.
async fn incomplete_headers_ids(&self) -> Result<HashSet<HeaderId<P::Hash, P::Number>>, Self::Error>;
async fn incomplete_headers_ids(&self) -> Result<HashSet<HeaderIdOf<P>>, Self::Error>;
/// Submit completion data for header.
async fn complete_header(
&self,
id: HeaderId<P::Hash, P::Number>,
completion: P::Completion,
) -> Result<HeaderId<P::Hash, P::Number>, Self::Error>;
async fn complete_header(&self, id: HeaderIdOf<P>, completion: P::Completion)
-> Result<HeaderIdOf<P>, Self::Error>;
/// Returns true if header requires extra data to be submitted.
async fn requires_extra(
&self,
header: QueuedHeader<P>,
) -> Result<(HeaderId<P::Hash, P::Number>, bool), Self::Error>;
async fn requires_extra(&self, header: QueuedHeader<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
}
/// Run headers synchronization.
@@ -327,7 +313,7 @@ pub fn run<P: HeadersSyncPipeline, TC: TargetClient<P>>(
},
submitted_headers = target_submit_header_future => {
// following line helps Rust understand the type of `submitted_headers` :/
let submitted_headers: SubmittedHeaders<HeaderId<P::Hash, P::Number>, TC::Error> = submitted_headers;
let submitted_headers: SubmittedHeaders<HeaderIdOf<P>, TC::Error> = submitted_headers;
let maybe_fatal_error = submitted_headers.fatal_error.map(Err).unwrap_or(Ok(()));
target_client_is_online = process_future_result(
@@ -380,7 +366,7 @@ pub fn run<P: HeadersSyncPipeline, TC: TargetClient<P>>(
// update metrics
if metrics_enabled {
metrics_global.update();
metrics_sync.update(&mut sync);
metrics_sync.update(&sync);
}
// print progress
@@ -55,8 +55,8 @@ impl SyncLoopMetrics {
/// Update metrics.
pub fn update<P: HeadersSyncPipeline>(&mut self, sync: &HeadersSync<P>) {
let headers = sync.headers();
let source_best_number = sync.source_best_number().unwrap_or(Zero::zero());
let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or(Zero::zero());
let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero);
let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero);
self.best_block_numbers
.with_label_values(&["source"])
@@ -408,7 +408,7 @@ fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) {
&mut backoff,
|_| {},
&mut go_offline_future,
|delay| async_std::task::sleep(delay),
async_std::task::sleep,
|| "Test error".into(),
);
@@ -95,6 +95,9 @@ pub trait HeadersSyncPipeline: Clone + Copy {
fn estimate_size(source: &QueuedHeader<Self>) -> usize;
}
/// A HeaderId for `HeaderSyncPipeline`.
pub type HeaderIdOf<P> = HeaderId<<P as HeadersSyncPipeline>::Hash, <P as HeadersSyncPipeline>::Number>;
/// Header that we're receiving from source node.
pub trait SourceHeader<Hash, Number> {
/// Returns ID of header.
+6 -8
View File
@@ -156,9 +156,10 @@ pub async fn run_async(params: Params, exit: Box<dyn Future<Output = ()> + Unpin
Ok(())
}
fn initial_next_events<'a>(
chains: &'a HashMap<ChainId, RefCell<Chain>>,
) -> Vec<Pin<Box<dyn Future<Output = Result<(ChainId, RawClientEvent), Error>> + 'a>>> {
type EventsResult = Result<(ChainId, RawClientEvent), Error>;
type EventsFuture<'a> = Pin<Box<dyn Future<Output = EventsResult> + 'a>>;
fn initial_next_events<'a>(chains: &'a HashMap<ChainId, RefCell<Chain>>) -> Vec<EventsFuture<'a>> {
chains
.values()
.map(|chain_cell| async move {
@@ -175,12 +176,9 @@ fn initial_next_events<'a>(
}
async fn next_event<'a>(
next_events: Vec<Pin<Box<dyn Future<Output = Result<(ChainId, RawClientEvent), Error>> + 'a>>>,
next_events: Vec<EventsFuture<'a>>,
chains: &'a HashMap<ChainId, RefCell<Chain>>,
) -> (
Result<(Hash, RawClientEvent), Error>,
Vec<Pin<Box<dyn Future<Output = Result<(ChainId, RawClientEvent), Error>> + 'a>>>,
) {
) -> (Result<(Hash, RawClientEvent), Error>, Vec<EventsFuture<'a>>) {
let (result, _, mut rest) = future::select_all(next_events).await;
match result {