From bdf6901ce2e67c17a698e9b4a3ea7d4b26ce1eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 20 Jul 2020 19:17:32 +0200 Subject: [PATCH] Fix clippy suggestions. (#179) * Fix clippy errors. * Cargo fmt. * Enable clippy checks. * Create if does not exist. * Fix warnings and enable sccache for clippy. * chmod +x * Revert and ignore errors. * Update cancel-workflow-action. * Fixes. * Clippy fixes. * Fix compilation. * Fix new clippy warnings. * fmt --all * Fix the rest. * fmt --all * Conditional. * Bump smallvec. * Use separate cache dir for clippy to prevent races. * Remove unused imports in tests * Remove "useless conversion" * Move clippy to main worfklow to avoid clashes. * Fix clippy error. * Fix remaning clippy errors. * cargo fmt --all Co-authored-by: Hernando Castano --- bridges/bin/node/node/src/chain_spec.rs | 1 - bridges/bin/node/node/src/service.rs | 4 +- bridges/bin/node/runtime/src/exchange.rs | 6 +- bridges/bin/node/runtime/src/kovan.rs | 9 +-- bridges/bin/node/runtime/src/lib.rs | 4 ++ bridges/bin/node/runtime/src/rialto.rs | 8 +-- bridges/modules/currency-exchange/src/lib.rs | 12 ++-- .../ethereum-contract/builtin/src/lib.rs | 10 +-- bridges/modules/ethereum/src/finality.rs | 22 ++----- bridges/modules/ethereum/src/import.rs | 5 +- bridges/modules/ethereum/src/lib.rs | 47 ++++++------- bridges/modules/ethereum/src/mock.rs | 2 +- bridges/modules/ethereum/src/test_utils.rs | 4 +- bridges/modules/ethereum/src/validators.rs | 15 ++--- bridges/modules/ethereum/src/verification.rs | 22 ++++--- bridges/modules/substrate/src/lib.rs | 4 +- bridges/primitives/ethereum-poa/src/lib.rs | 10 ++- .../relays/ethereum/src/ethereum_exchange.rs | 2 +- .../relays/ethereum/src/ethereum_sync_loop.rs | 3 +- bridges/relays/ethereum/src/ethereum_types.rs | 4 +- bridges/relays/ethereum/src/exchange.rs | 8 +-- bridges/relays/ethereum/src/headers.rs | 66 ++++++++----------- bridges/relays/ethereum/src/main.rs | 1 - bridges/relays/ethereum/src/rpc_errors.rs | 6 +- .../relays/ethereum/src/substrate_client.rs | 2 +- bridges/relays/ethereum/src/sync.rs | 14 ++-- bridges/relays/ethereum/src/sync_loop.rs | 42 ++++-------- .../relays/ethereum/src/sync_loop_metrics.rs | 4 +- .../relays/ethereum/src/sync_loop_tests.rs | 2 +- bridges/relays/ethereum/src/sync_types.rs | 3 + bridges/relays/substrate/src/bridge.rs | 14 ++-- 31 files changed, 159 insertions(+), 197 deletions(-) diff --git a/bridges/bin/node/node/src/chain_spec.rs b/bridges/bin/node/node/src/chain_spec.rs index 436e82eaed..425448b921 100644 --- a/bridges/bin/node/node/src/chain_spec.rs +++ b/bridges/bin/node/node/src/chain_spec.rs @@ -19,7 +19,6 @@ use bridge_node_runtime::{ SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; use grandpa_primitives::AuthorityId as GrandpaId; -use sc_service; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; diff --git a/bridges/bin/node/node/src/service.rs b/bridges/bin/node/node/src/service.rs index b033ff5327..c32ecc3220 100644 --- a/bridges/bin/node/node/src/service.rs +++ b/bridges/bin/node/node/src/service.rs @@ -188,7 +188,7 @@ pub fn new_full(config: Configuration) -> Result Result crate::AccountId { hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() diff --git a/bridges/bin/node/runtime/src/kovan.rs b/bridges/bin/node/runtime/src/kovan.rs index 44532e1d58..fe62fce163 100644 --- a/bridges/bin/node/runtime/src/kovan.rs +++ b/bridges/bin/node/runtime/src/kovan.rs @@ -108,12 +108,11 @@ pub fn genesis_header() -> Header { gas_limit: 6000000.into(), difficulty: 131072.into(), seal: vec![ - vec![128].into(), + vec![128], vec![ 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ] - .into(), + ], ], } } @@ -128,9 +127,7 @@ pub struct PruningStrategy; impl BridgePruningStrategy for PruningStrategy { fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number - .checked_sub(FINALIZED_HEADERS_TO_KEEP) - .unwrap_or(0) + best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) } } diff --git a/bridges/bin/node/runtime/src/lib.rs b/bridges/bin/node/runtime/src/lib.rs index e61d924ba1..d77b547644 100644 --- a/bridges/bin/node/runtime/src/lib.rs +++ b/bridges/bin/node/runtime/src/lib.rs @@ -19,6 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] // Make the WASM binary available. #[cfg(feature = "std")] diff --git a/bridges/bin/node/runtime/src/rialto.rs b/bridges/bin/node/runtime/src/rialto.rs index fa272fe8af..cdc314fb48 100644 --- a/bridges/bin/node/runtime/src/rialto.rs +++ b/bridges/bin/node/runtime/src/rialto.rs @@ -82,10 +82,10 @@ pub fn genesis_header() -> Header { gas_used: Default::default(), gas_limit: 0x222222.into(), difficulty: 0x20000.into(), - seal: vec![vec![0x80].into(), { + seal: vec![vec![0x80], { let mut vec = vec![0xb8, 0x41]; vec.resize(67, 0); - vec.into() + vec }], } } @@ -100,9 +100,7 @@ pub struct PruningStrategy; impl TPruningStrategy for PruningStrategy { fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number - .checked_sub(FINALIZED_HEADERS_TO_KEEP) - .unwrap_or(0) + best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) } } diff --git a/bridges/modules/currency-exchange/src/lib.rs b/bridges/modules/currency-exchange/src/lib.rs index c6ca356e00..12d8657cab 100644 --- a/bridges/modules/currency-exchange/src/lib.rs +++ b/bridges/modules/currency-exchange/src/lib.rs @@ -132,7 +132,7 @@ decl_module! { match deposit_result { Ok(_) => (), Err(ExchangeError::DepositPartiallyFailed) => (), - Err(error) => Err(Error::::from(error))?, + Err(error) => return Err(Error::::from(error).into()), } Transfers::::insert(&transfer_id, ()) } @@ -273,12 +273,10 @@ mod tests { type Amount = u64; fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> sp_currency_exchange::Result<()> { - if amount < MAX_DEPOSIT_AMOUNT * 10 { - Ok(()) - } else if amount == MAX_DEPOSIT_AMOUNT * 10 { - Err(ExchangeError::DepositPartiallyFailed) - } else { - Err(ExchangeError::DepositFailed) + match amount { + amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), + amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed), + _ => Err(ExchangeError::DepositFailed), } } } diff --git a/bridges/modules/ethereum-contract/builtin/src/lib.rs b/bridges/modules/ethereum-contract/builtin/src/lib.rs index 2f4200f344..5f284231a7 100644 --- a/bridges/modules/ethereum-contract/builtin/src/lib.rs +++ b/bridges/modules/ethereum-contract/builtin/src/lib.rs @@ -35,7 +35,7 @@ pub enum Error { /// Failed to decode finality proof. FinalityProofDecode(codec::Error), /// Failed to verify justification. - JustificationVerify(ClientError), + JustificationVerify(Box), } /// Substrate header. @@ -120,6 +120,7 @@ pub fn verify_substrate_finality_proof( best_set_id, &best_set, ) + .map_err(Box::new) .map_err(Error::JustificationVerify) .map(|_| ()) } @@ -246,17 +247,16 @@ mod tests { /// Number of the example block with justification. const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; /// Hash of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_HASH: &'static str = - "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; + const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; /// Id of authorities set that have generated example justification. Could be computed by tracking /// every set change in canonized headers. const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; /// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange` /// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at /// appropriate block. - const EXAMPLE_AUTHORITIES_SET: &'static str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; + const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. - const EXAMPLE_JUSTIFICATION: &'static str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; + const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; #[test] fn substrate_header_parse_fails() { diff --git a/bridges/modules/ethereum/src/finality.rs b/bridges/modules/ethereum/src/finality.rs index e113f66831..c847e77704 100644 --- a/bridges/modules/ethereum/src/finality.rs +++ b/bridges/modules/ethereum/src/finality.rs @@ -99,7 +99,7 @@ pub fn finalize_blocks( *hash == header_validators.0.hash || *hash == best_finalized.hash }) }) - .unwrap_or_else(|| CachedFinalityVotes::default()), + .unwrap_or_default(), best_finalized, &validators, id, @@ -247,7 +247,7 @@ fn empty_steps_signers(header: &Header) -> BTreeSet
{ header .empty_steps() .into_iter() - .flat_map(|steps| steps) + .flatten() .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) .collect::>() } @@ -462,13 +462,9 @@ mod tests { // when we're inserting header#7 and last finalized header is 0: // check that votes at #7 are computed correctly without cache let expected_votes_at_7 = FinalityVotes { - votes: vec![ - (ctx.addresses[0].clone(), 3), - (ctx.addresses[1].clone(), 3), - (ctx.addresses[2].clone(), 1), - ] - .into_iter() - .collect(), + votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] + .into_iter() + .collect(), ancestry: ancestry[..7].iter().cloned().collect(), }; let id7 = headers[6].compute_id(); @@ -491,9 +487,7 @@ mod tests { // cached votes at #5 let expected_votes_at_5 = FinalityVotes { - votes: vec![(ctx.addresses[0].clone(), 3), (ctx.addresses[1].clone(), 2)] - .into_iter() - .collect(), + votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), ancestry: ancestry[..5].iter().cloned().collect(), }; FinalityCache::::insert(hashes[4], expected_votes_at_5); @@ -520,9 +514,7 @@ mod tests { // when we're inserting header#7 and last finalized header is 3: // check that votes at #7 are computed correctly with cache let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[1].clone(), 3), (ctx.addresses[2].clone(), 1)] - .into_iter() - .collect(), + votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), ancestry: ancestry[3..7].iter().cloned().collect(), }; assert_eq!( diff --git a/bridges/modules/ethereum/src/import.rs b/bridges/modules/ethereum/src/import.rs index 588b4dfdbf..490c164e47 100644 --- a/bridges/modules/ethereum/src/import.rs +++ b/bridges/modules/ethereum/src/import.rs @@ -70,6 +70,9 @@ pub fn import_headers( Ok((useful, useless)) } +/// A vector of finalized headers and their submitters. +pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; + /// Imports given header and updates blocks finality (if required). /// /// Transactions receipts must be provided if `header_import_requires_receipts()` @@ -84,7 +87,7 @@ pub fn import_header( submitter: Option, header: Header, receipts: Option>, -) -> Result<(HeaderId, Vec<(HeaderId, Option)>), Error> { +) -> Result<(HeaderId, FinalizedHeaders), Error> { // first check that we are able to import this header at all let (header_id, finalized_id) = is_importable_header(storage, &header)?; diff --git a/bridges/modules/ethereum/src/lib.rs b/bridges/modules/ethereum/src/lib.rs index b77f436301..bfae5d4b26 100644 --- a/bridges/modules/ethereum/src/lib.rs +++ b/bridges/modules/ethereum/src/lib.rs @@ -15,6 +15,8 @@ // along with Parity Bridges Common. If not, see . #![cfg_attr(not(feature = "std"), no_std)] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] use crate::finality::{CachedFinalityVotes, FinalityVotes}; use codec::{Decode, Encode}; @@ -235,6 +237,7 @@ impl ImportContext { } /// Converts import context into header we're going to import. + #[allow(clippy::too_many_arguments)] pub fn into_import_header( self, is_best: bool, @@ -503,7 +506,7 @@ impl Module { } /// Verify that transaction is included into given finalized block. - pub fn verify_transaction_finalized(block: H256, tx_index: u64, proof: &Vec) -> bool { + pub fn verify_transaction_finalized(block: H256, tx_index: u64, proof: &[RawTransaction]) -> bool { crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) } } @@ -616,13 +619,8 @@ impl BridgeStorage { blocks_at_number: &mut Vec, ) { // ensure that unfinalized headers we want to prune do not have scheduled changes - if number > finalized_number { - if blocks_at_number - .iter() - .any(|block| ScheduledChanges::contains_key(block)) - { - return; - } + if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::contains_key) { + return; } // physically remove headers and (probably) obsolete validators sets @@ -678,11 +676,9 @@ impl Storage for BridgeStorage { let mut current_id = *parent; loop { // if we have reached finalized block' sibling => stop with special signal - if current_id.number == best_finalized.number { - if current_id.hash != best_finalized.hash { - votes.stopped_at_finalized_sibling = true; - return votes; - } + if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash { + votes.stopped_at_finalized_sibling = true; + return votes; } // if we have reached target header => stop @@ -834,6 +830,7 @@ impl Storage for BridgeStorage { } /// Initialize storage. +#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] pub(crate) fn initialize_storage( initial_header: &Header, initial_difficulty: U256, @@ -885,7 +882,7 @@ pub fn verify_transaction_finalized( storage: &S, block: H256, tx_index: u64, - proof: &Vec, + proof: &[RawTransaction], ) -> bool { if tx_index >= proof.len() as _ { return false; @@ -906,9 +903,7 @@ pub fn verify_transaction_finalized( let is_finalized = match header.number < finalized.number { true => ancestry(storage, finalized.hash) .skip_while(|(_, ancestor)| ancestor.number > header.number) - .filter(|&(ancestor_hash, _)| ancestor_hash == block) - .next() - .is_some(), + .any(|(ancestor_hash, _)| ancestor_hash == block), false => block == finalized.hash, }; if !is_finalized { @@ -985,7 +980,7 @@ pub(crate) mod tests { hash, StoredHeader { submitter: None, - header: header, + header, total_difficulty: 0.into(), next_validators_set_id: 0, last_signal_block: None, @@ -1266,7 +1261,7 @@ pub(crate) mod tests { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &vec![example_tx()],), + verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &[example_tx()],), true, ); }); @@ -1280,7 +1275,7 @@ pub(crate) mod tests { insert_header(&mut storage, example_header()); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); assert_eq!( - verify_transaction_finalized(&storage, example_header_parent().compute_hash(), 0, &vec![example_tx()],), + verify_transaction_finalized(&storage, example_header_parent().compute_hash(), 0, &[example_tx()],), true, ); }); @@ -1291,7 +1286,7 @@ pub(crate) mod tests { run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &vec![],), + verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), false, ); }); @@ -1302,7 +1297,7 @@ pub(crate) mod tests { run_test(TOTAL_VALIDATORS, |_| { let storage = BridgeStorage::::new(); assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &vec![],), + verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), false, ); }); @@ -1315,7 +1310,7 @@ pub(crate) mod tests { insert_header(&mut storage, example_header_parent()); insert_header(&mut storage, example_header()); assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &vec![example_tx()],), + verify_transaction_finalized(&storage, example_header().compute_hash(), 0, &[example_tx()],), false, ); }); @@ -1334,7 +1329,7 @@ pub(crate) mod tests { insert_header(&mut storage, finalized_header_sibling); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); assert_eq!( - verify_transaction_finalized(&storage, finalized_header_sibling_hash, 0, &vec![example_tx()],), + verify_transaction_finalized(&storage, finalized_header_sibling_hash, 0, &[example_tx()],), false, ); }); @@ -1353,7 +1348,7 @@ pub(crate) mod tests { insert_header(&mut storage, example_header()); storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); assert_eq!( - verify_transaction_finalized(&storage, finalized_header_uncle_hash, 0, &vec![example_tx()],), + verify_transaction_finalized(&storage, finalized_header_uncle_hash, 0, &[example_tx()],), false, ); }); @@ -1368,7 +1363,7 @@ pub(crate) mod tests { &storage, example_header().compute_hash(), 0, - &vec![example_tx(), example_tx(),], + &[example_tx(), example_tx()], ), false, ); diff --git a/bridges/modules/ethereum/src/mock.rs b/bridges/modules/ethereum/src/mock.rs index 6c68d92758..0dde85bd93 100644 --- a/bridges/modules/ethereum/src/mock.rs +++ b/bridges/modules/ethereum/src/mock.rs @@ -160,6 +160,6 @@ impl Default for KeepSomeHeadersBehindBest { impl PruningStrategy for KeepSomeHeadersBehindBest { fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { - best_number.checked_sub(self.0).unwrap_or(0) + best_number.saturating_sub(self.0) } } diff --git a/bridges/modules/ethereum/src/test_utils.rs b/bridges/modules/ethereum/src/test_utils.rs index 9084783d68..eacdb680e0 100644 --- a/bridges/modules/ethereum/src/test_utils.rs +++ b/bridges/modules/ethereum/src/test_utils.rs @@ -86,7 +86,7 @@ impl HeaderBuilder { use crate::HeadersByNumber; use frame_support::StorageMap; - let parent_hash = HeadersByNumber::get(parent_number).unwrap()[0].clone(); + let parent_hash = HeadersByNumber::get(parent_number).unwrap()[0]; Self::with_parent_hash_on_runtime::(parent_hash) } @@ -130,7 +130,7 @@ impl HeaderBuilder { /// Adds empty steps to this header. pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { let sealed_empty_steps = empty_steps - .into_iter() + .iter() .map(|(author, step)| { let mut empty_step = SealedEmptyStep { step: *step, diff --git a/bridges/modules/ethereum/src/validators.rs b/bridges/modules/ethereum/src/validators.rs index 5620bcb86c..7aa2dd8cb1 100644 --- a/bridges/modules/ethereum/src/validators.rs +++ b/bridges/modules/ethereum/src/validators.rs @@ -20,7 +20,7 @@ use primitives::{Address, Header, HeaderId, LogEntry, Receipt, U256}; use sp_std::prelude::*; /// The hash of InitiateChange event of the validators set contract. -pub(crate) const CHANGE_EVENT_HASH: &'static [u8; 32] = &[ +pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28, 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, ]; @@ -49,6 +49,9 @@ pub enum ValidatorsSource { Contract(Address, Vec
), } +/// A short hand for optional validators change. +pub type ValidatorsChange = Option>; + /// Validators manager. pub struct Validators<'a> { config: &'a ValidatorsConfiguration, @@ -94,7 +97,7 @@ impl<'a> Validators<'a> { &self, header: &Header, receipts: Option>, - ) -> Result<(Option>, Option>), Error> { + ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { // let's first check if new source is starting from this header let (source_index, _, source) = self.source_at(header.number); let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); @@ -223,7 +226,7 @@ impl<'a> Validators<'a> { } /// Returns source of validators that should author the header. - fn source_at<'b>(&'b self, header_number: u64) -> (usize, u64, &'b ValidatorsSource) { + fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { match self.config { ValidatorsConfiguration::Single(ref source) => (0, 0, source), ValidatorsConfiguration::Multi(ref sources) => sources @@ -240,11 +243,7 @@ impl<'a> Validators<'a> { } /// Returns source of validators that should author the next header. - fn source_at_next_header<'b>( - &'b self, - header_source_index: usize, - header_number: u64, - ) -> (u64, &'b ValidatorsSource) { + fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) { match self.config { ValidatorsConfiguration::Single(ref source) => (0, source), ValidatorsConfiguration::Multi(ref sources) => { diff --git a/bridges/modules/ethereum/src/verification.rs b/bridges/modules/ethereum/src/verification.rs index 3c49e81eac..3b1c186592 100644 --- a/bridges/modules/ethereum/src/verification.rs +++ b/bridges/modules/ethereum/src/verification.rs @@ -22,6 +22,7 @@ use primitives::{ public_to_address, step_validator, Address, Header, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256, }; use sp_io::crypto::secp256k1_ecdsa_recover; +use sp_runtime::transaction_validity::TransactionTag; use sp_std::{vec, vec::Vec}; /// Pre-check to see if should try and import this header. @@ -43,6 +44,8 @@ pub fn is_importable_header(storage: &S, header: &Header) -> Result< } /// Try accept unsigned aura header into transaction pool. +/// +/// Returns required and provided tags. pub fn accept_aura_header_into_pool( storage: &S, config: &AuraConfiguration, @@ -50,7 +53,7 @@ pub fn accept_aura_header_into_pool( pool_config: &PoolConfiguration, header: &Header, receipts: Option<&Vec>, -) -> Result<(Vec>, Vec>), Error> { +) -> Result<(Vec, Vec), Error> { // check if we can verify further let (header_id, _) = is_importable_header(storage, header)?; @@ -365,6 +368,7 @@ mod tests { use frame_support::{StorageMap, StorageValue}; use primitives::{compute_merkle_root, rlp_encode, TransactionOutcome, H520}; use secp256k1::SecretKey; + use sp_runtime::transaction_validity::TransactionTag; const GENESIS_STEP: u64 = 42; const TOTAL_VALIDATORS: usize = 3; @@ -386,7 +390,7 @@ mod tests { fn default_accept_into_pool( mut make_header: impl FnMut(&[SecretKey]) -> (Header, Option>), - ) -> Result<(Vec>, Vec>), Error> { + ) -> Result<(Vec, Vec), Error> { run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { let validators = vec![validator(0), validator(1), validator(2)]; let mut storage = BridgeStorage::::new(); @@ -429,7 +433,7 @@ mod tests { }, ); - let header_hash = HeadersByNumber::get(&number).unwrap()[0].clone(); + let header_hash = HeadersByNumber::get(&number).unwrap()[0]; let mut header = Headers::::get(&header_hash).unwrap(); header.next_validators_set_id = set_id; if let Some(signalled_set) = signalled_set { @@ -456,15 +460,15 @@ mod tests { assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); // when there's single seal (we expect 2 or 3 seals) - header.seal = vec![vec![].into()]; + header.seal = vec![vec![]]; assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); // when there's 3 seals (we expect 2 by default) - header.seal = vec![vec![].into(), vec![].into(), vec![].into()]; + header.seal = vec![vec![], vec![], vec![]]; assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); // when there's 2 seals - header.seal = vec![vec![].into(), vec![].into()]; + header.seal = vec![vec![], vec![]]; assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); } @@ -564,7 +568,7 @@ mod tests { fn verifies_step() { // when step is missing from seals let mut header = Header { - seal: vec![vec![].into(), vec![].into()], + seal: vec![vec![], vec![]], gas_limit: test_aura_config().min_gas_limit, parent_hash: genesis().compute_hash(), ..Default::default() @@ -585,7 +589,7 @@ mod tests { // when step is lesser that for the parent block header.seal[0] = rlp_encode(&40u64); - header.seal = vec![vec![40].into(), vec![].into()]; + header.seal = vec![vec![40], vec![]]; assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); // when step is OK @@ -691,7 +695,7 @@ mod tests { default_accept_into_pool(|_| ( Header { number: 20_000_000, - seal: vec![vec![].into(), vec![].into()], + seal: vec![vec![], vec![]], gas_limit: test_aura_config().min_gas_limit, log_bloom: (&[0xff; 256]).into(), ..Default::default() diff --git a/bridges/modules/substrate/src/lib.rs b/bridges/modules/substrate/src/lib.rs index 61d50d6fdd..9afa0872a5 100644 --- a/bridges/modules/substrate/src/lib.rs +++ b/bridges/modules/substrate/src/lib.rs @@ -157,9 +157,9 @@ impl Module { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, - validator_set: &Vec<(AuthorityId, AuthorityWeight)>, + validator_set: &[(AuthorityId, AuthorityWeight)], ) -> DispatchResult { - let checker = >::new(*state_root, proof.clone()); + let checker = >::new(*state_root, proof); let checker = checker.map_err(Self::map_storage_err)?; diff --git a/bridges/primitives/ethereum-poa/src/lib.rs b/bridges/primitives/ethereum-poa/src/lib.rs index fa47912f39..6515f901fe 100644 --- a/bridges/primitives/ethereum-poa/src/lib.rs +++ b/bridges/primitives/ethereum-poa/src/lib.rs @@ -15,6 +15,10 @@ // along with Parity Bridges Common. If not, see . #![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Generated by `DecodeLimit::decode_with_depth_limit` +#![allow(clippy::unnecessary_mut_passed)] pub use parity_bytes::Bytes; pub use primitive_types::{H160, H256, H512, U128, U256}; @@ -206,7 +210,7 @@ impl Header { /// Check if passed transactions are matching transactions root in this header. pub fn verify_transactions_root(&self, transactions: &[RawTransaction]) -> bool { - verify_merkle_proof(self.transactions_root, transactions.into_iter()) + verify_merkle_proof(self.transactions_root, transactions.iter()) } /// Gets the seal hash of this header. @@ -581,7 +585,7 @@ mod tests { gas: 86016.into(), to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), value: 0.into(), - payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec().into(), + payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), }, }), ); @@ -600,7 +604,7 @@ mod tests { gas: 160000.into(), to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), value: 0.into(), - payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec().into(), + payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), }, }), ); diff --git a/bridges/relays/ethereum/src/ethereum_exchange.rs b/bridges/relays/ethereum/src/ethereum_exchange.rs index 385c68a3fe..95ded39f60 100644 --- a/bridges/relays/ethereum/src/ethereum_exchange.rs +++ b/bridges/relays/ethereum/src/ethereum_exchange.rs @@ -100,7 +100,7 @@ impl SourceClient for EthereumTransactionsSource { eth_header_id: &EthereumHeaderId, eth_tx: EthereumTransaction, ) -> Result { - const TRANSACTION_HAS_RAW_FIELD_PROOF: &'static str = "RPC level checks that transactions from Ethereum\ + const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\ node are having `raw` field; qed"; let eth_header = self.client.header_by_hash_with_transactions(eth_header_id.1).await?; diff --git a/bridges/relays/ethereum/src/ethereum_sync_loop.rs b/bridges/relays/ethereum/src/ethereum_sync_loop.rs index 162f93c631..4a13fa6a1d 100644 --- a/bridges/relays/ethereum/src/ethereum_sync_loop.rs +++ b/bridges/relays/ethereum/src/ethereum_sync_loop.rs @@ -163,7 +163,7 @@ impl TargetClient for SubstrateHeadersTarget { &self, headers: Vec, ) -> SubmittedHeaders { - let (sign_params, sign_transactions) = (self.sign_params.clone(), self.sign_transactions.clone()); + let (sign_params, sign_transactions) = (self.sign_params.clone(), self.sign_transactions); self.client .submit_ethereum_headers(sign_params, headers, sign_transactions) .await @@ -173,6 +173,7 @@ impl TargetClient for SubstrateHeadersTarget { Ok(HashSet::new()) } + #[allow(clippy::unit_arg)] async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result { Ok(id) } diff --git a/bridges/relays/ethereum/src/ethereum_types.rs b/bridges/relays/ethereum/src/ethereum_types.rs index 6e118e3ad9..58aa349701 100644 --- a/bridges/relays/ethereum/src/ethereum_types.rs +++ b/bridges/relays/ethereum/src/ethereum_types.rs @@ -22,11 +22,11 @@ pub use web3::types::{Address, Bytes, CallRequest, H256, U128, U256, U64}; /// When header is just received from the Ethereum node, we check that it has /// both number and hash fields filled. -pub const HEADER_ID_PROOF: &'static str = "checked on retrieval; qed"; +pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; /// When receipt is just received from the Ethereum node, we check that it has /// gas_used field filled. -pub const RECEIPT_GAS_USED_PROOF: &'static str = "checked on retrieval; qed"; +pub const RECEIPT_GAS_USED_PROOF: &str = "checked on retrieval; qed"; /// Ethereum transaction hash type. pub type TransactionHash = H256; diff --git a/bridges/relays/ethereum/src/exchange.rs b/bridges/relays/ethereum/src/exchange.rs index d465e8a214..352e53fe6c 100644 --- a/bridges/relays/ethereum/src/exchange.rs +++ b/bridges/relays/ethereum/src/exchange.rs @@ -323,7 +323,7 @@ mod tests { &self, _: &TestTransactionHash, ) -> Result, TestError> { - self.data.lock().transaction.clone() + self.data.lock().transaction } async fn transaction_proof( @@ -331,7 +331,7 @@ mod tests { _: &TestHeaderId, _: TestTransaction, ) -> Result { - self.data.lock().transaction_proof.clone() + self.data.lock().transaction_proof } } @@ -368,11 +368,11 @@ mod tests { } async fn is_header_known(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_known.clone() + self.data.lock().is_header_known } async fn is_header_finalized(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_finalized.clone() + self.data.lock().is_header_finalized } async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> { diff --git a/bridges/relays/ethereum/src/headers.rs b/bridges/relays/ethereum/src/headers.rs index c1dcc86444..8980bcccf3 100644 --- a/bridges/relays/ethereum/src/headers.rs +++ b/bridges/relays/ethereum/src/headers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::sync_types::{HeaderId, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader}; +use crate::sync_types::{HeaderId, HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader}; use linked_hash_map::LinkedHashMap; use num_traits::{One, Zero}; use std::{ @@ -59,9 +59,9 @@ pub struct QueuedHeaders { known_headers: KnownHeaders

, /// Headers that are waiting for completion data from source node. Mapped (and auto-sorted /// by) to the last fetch time. - incomplete_headers: LinkedHashMap, Option>, + incomplete_headers: LinkedHashMap, Option>, /// Headers that are waiting to be completed at target node. Auto-sorted by insertion time. - completion_data: LinkedHashMap, P::Completion>, + completion_data: LinkedHashMap, P::Completion>, /// Best synced block number. best_synced_number: P::Number, /// Pruned blocks border. We do not store or accept any blocks with number less than @@ -106,7 +106,7 @@ impl QueuedHeaders

{ /// Returns number of headers that are currently in given queue. pub fn headers_in_status(&self, status: HeaderStatus) -> usize { match status { - HeaderStatus::Unknown | HeaderStatus::Synced => return 0, + HeaderStatus::Unknown | HeaderStatus::Synced => 0, HeaderStatus::MaybeOrphan => self .maybe_orphan .values() @@ -168,7 +168,7 @@ impl QueuedHeaders

{ } /// Returns synchronization status of the header. - pub fn status(&self, id: &HeaderId) -> HeaderStatus { + pub fn status(&self, id: &HeaderIdOf

) -> HeaderStatus { self.known_headers .get(&id.0) .and_then(|x| x.get(&id.1)) @@ -179,7 +179,7 @@ impl QueuedHeaders

{ /// Get oldest header from given queue. pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { match status { - HeaderStatus::Unknown | HeaderStatus::Synced => return None, + HeaderStatus::Unknown | HeaderStatus::Synced => None, HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan), HeaderStatus::Orphan => oldest_header(&self.orphan), HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra), @@ -197,7 +197,7 @@ impl QueuedHeaders

{ f: impl FnMut(&QueuedHeader

) -> bool, ) -> Option>> { match status { - HeaderStatus::Unknown | HeaderStatus::Synced => return None, + HeaderStatus::Unknown | HeaderStatus::Synced => None, HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f), HeaderStatus::Orphan => oldest_headers(&self.orphan, f), HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f), @@ -268,12 +268,12 @@ impl QueuedHeaders

{ } /// Receive best header from the target node. - pub fn target_best_header_response(&mut self, id: &HeaderId) { + pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { self.header_synced(id) } /// Receive target node response for MaybeOrphan request. - pub fn maybe_orphan_response(&mut self, id: &HeaderId, response: bool) { + pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf

, response: bool) { if !response { move_header_descendants::

( &mut [&mut self.maybe_orphan], @@ -295,7 +295,7 @@ impl QueuedHeaders

{ } /// Receive target node response for MaybeExtra request. - pub fn maybe_extra_response(&mut self, id: &HeaderId, response: bool) { + pub fn maybe_extra_response(&mut self, id: &HeaderIdOf

, response: bool) { let (destination_status, destination_queue) = if response { (HeaderStatus::Extra, &mut self.extra) } else if self.is_parent_incomplete(id) { @@ -315,7 +315,7 @@ impl QueuedHeaders

{ } /// Receive extra from source node. - pub fn extra_response(&mut self, id: &HeaderId, extra: P::Extra) { + pub fn extra_response(&mut self, id: &HeaderIdOf

, extra: P::Extra) { let (destination_status, destination_queue) = if self.is_parent_incomplete(id) { (HeaderStatus::Incomplete, &mut self.incomplete) } else { @@ -334,7 +334,7 @@ impl QueuedHeaders

{ } /// Receive completion response from source node. - pub fn completion_response(&mut self, id: &HeaderId, completion: Option) { + pub fn completion_response(&mut self, id: &HeaderIdOf

, completion: Option) { let completion = match completion { Some(completion) => completion, None => { @@ -361,12 +361,12 @@ impl QueuedHeaders

{ id, ); - self.completion_data.insert(id.clone(), completion); + self.completion_data.insert(*id, completion); } } /// When header is submitted to target node. - pub fn headers_submitted(&mut self, ids: Vec>) { + pub fn headers_submitted(&mut self, ids: Vec>) { for id in ids { move_header( &mut self.ready, @@ -380,7 +380,7 @@ impl QueuedHeaders

{ } /// When header completion data is sent to target node. - pub fn header_completed(&mut self, id: &HeaderId) { + pub fn header_completed(&mut self, id: &HeaderIdOf

) { if self.completion_data.remove(id).is_some() { log::debug!( target: "bridge", @@ -404,7 +404,7 @@ impl QueuedHeaders

{ } /// Marks given headers incomplete. - pub fn add_incomplete_headers(&mut self, new_incomplete_headers: Vec>) { + pub fn add_incomplete_headers(&mut self, new_incomplete_headers: Vec>) { for new_incomplete_header in new_incomplete_headers { self.header_synced(&new_incomplete_header); move_header_descendants::

( @@ -426,7 +426,7 @@ impl QueuedHeaders

{ } /// When incomplete headers ids are receved from target node. - pub fn incomplete_headers_response(&mut self, ids: HashSet>) { + pub fn incomplete_headers_response(&mut self, ids: HashSet>) { // all new incomplete headers are marked Synced and all their descendants // are moved from Ready/Submitted to Incomplete queue let new_incomplete_headers = ids @@ -466,7 +466,7 @@ impl QueuedHeaders

{ } /// Returns id of the header for which we want to fetch completion data. - pub fn incomplete_header(&mut self) -> Option> { + pub fn incomplete_header(&mut self) -> Option> { queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| { let retry = match *last_fetch_time { Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL, @@ -483,7 +483,7 @@ impl QueuedHeaders

{ } /// Returns header completion data to upload to target node. - pub fn header_to_complete(&mut self) -> Option<(HeaderId, &P::Completion)> { + pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf

, &P::Completion)> { queued_incomplete_header(&mut self.completion_data, |_| true) } @@ -520,7 +520,7 @@ impl QueuedHeaders

{ /// Returns true if parent of this header is either incomplete or waiting for /// its own incomplete ancestor to be completed. - fn is_parent_incomplete(&self, id: &HeaderId) -> bool { + fn is_parent_incomplete(&self, id: &HeaderIdOf

) -> bool { let status = self.status(id); let header = match status { HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id), @@ -546,7 +546,7 @@ impl QueuedHeaders

{ } /// When we receive new Synced header from target node. - fn header_synced(&mut self, id: &HeaderId) { + fn header_synced(&mut self, id: &HeaderIdOf

) { // update best synced block number self.best_synced_number = std::cmp::max(self.best_synced_number, id.0); @@ -593,19 +593,12 @@ impl QueuedHeaders

{ } /// Insert header to the queue. -fn insert_header( - queue: &mut HeadersQueue

, - id: HeaderId, - header: QueuedHeader

, -) { +fn insert_header(queue: &mut HeadersQueue

, id: HeaderIdOf

, header: QueuedHeader

) { queue.entry(id.0).or_default().insert(id.1, header); } /// Remove header from the queue. -fn remove_header( - queue: &mut HeadersQueue

, - id: &HeaderId, -) -> Option> { +fn remove_header(queue: &mut HeadersQueue

, id: &HeaderIdOf

) -> Option> { let mut headers_at = match queue.entry(id.0) { BTreeMapEntry::Occupied(headers_at) => headers_at, BTreeMapEntry::Vacant(_) => return None, @@ -619,10 +612,7 @@ fn remove_header( } /// Get header from the queue. -fn header<'a, P: HeadersSyncPipeline>( - queue: &'a HeadersQueue

, - id: &HeaderId, -) -> Option<&'a QueuedHeader

> { +fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue

, id: &HeaderIdOf

) -> Option<&'a QueuedHeader

> { queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1)) } @@ -634,9 +624,9 @@ fn move_header( destination_queue: &mut HeadersQueue

, known_headers: &mut KnownHeaders

, destination_status: HeaderStatus, - id: &HeaderId, + id: &HeaderIdOf

, prepare: impl FnOnce(QueuedHeader

) -> QueuedHeader

, -) -> Option> { +) -> Option> { let header = match remove_header(source_queue, id) { Some(header) => prepare(header), None => return None, @@ -655,7 +645,7 @@ fn move_header_descendants( destination_queue: &mut HeadersQueue

, known_headers: &mut KnownHeaders

, destination_status: HeaderStatus, - id: &HeaderId, + id: &HeaderIdOf

, ) { let mut current_number = id.0 + One::one(); let mut current_parents = HashSet::new(); @@ -743,7 +733,7 @@ fn prune_known_headers(known_headers: &mut KnownHeaders< /// Change header status. fn set_header_status( known_headers: &mut KnownHeaders

, - id: &HeaderId, + id: &HeaderIdOf

, status: HeaderStatus, ) { log::debug!( diff --git a/bridges/relays/ethereum/src/main.rs b/bridges/relays/ethereum/src/main.rs index a574e64775..4894c7efb2 100644 --- a/bridges/relays/ethereum/src/main.rs +++ b/bridges/relays/ethereum/src/main.rs @@ -101,7 +101,6 @@ fn main() { } ("", _) => { log::error!(target: "bridge", "No subcommand specified"); - return; } _ => unreachable!("all possible subcommands are checked above; qed"), } diff --git a/bridges/relays/ethereum/src/rpc_errors.rs b/bridges/relays/ethereum/src/rpc_errors.rs index 65f757a0e0..4645d4e4dc 100644 --- a/bridges/relays/ethereum/src/rpc_errors.rs +++ b/bridges/relays/ethereum/src/rpc_errors.rs @@ -18,7 +18,6 @@ use crate::ethereum_types::{EthereumHeaderId, TransactionHash as EthereumTransac use crate::sync_types::MaybeConnectionError; use jsonrpsee::client::RequestError; -use serde_json; /// Contains common errors that can occur when /// interacting with a Substrate or Ethereum node @@ -79,10 +78,7 @@ impl From for RpcError { impl MaybeConnectionError for RpcError { fn is_connection_error(&self) -> bool { - match *self { - RpcError::Request(RequestError::TransportError(_)) => true, - _ => false, - } + matches!(*self, RpcError::Request(RequestError::TransportError(_))) } } diff --git a/bridges/relays/ethereum/src/substrate_client.rs b/bridges/relays/ethereum/src/substrate_client.rs index e349213dfc..e0c59457b0 100644 --- a/bridges/relays/ethereum/src/substrate_client.rs +++ b/bridges/relays/ethereum/src/substrate_client.rs @@ -400,5 +400,5 @@ fn create_signed_transaction( let signer: sp_runtime::MultiSigner = signer.public().into(); let (function, extra, _) = raw_payload.deconstruct(); - bridge_node_runtime::UncheckedExtrinsic::new_signed(function, signer.into_account().into(), signature.into(), extra) + bridge_node_runtime::UncheckedExtrinsic::new_signed(function, signer.into_account(), signature.into(), extra) } diff --git a/bridges/relays/ethereum/src/sync.rs b/bridges/relays/ethereum/src/sync.rs index 481311f20f..b1799bf2d2 100644 --- a/bridges/relays/ethereum/src/sync.rs +++ b/bridges/relays/ethereum/src/sync.rs @@ -15,7 +15,7 @@ // along with Parity Bridges Common. If not, see . use crate::headers::QueuedHeaders; -use crate::sync_types::{HeaderId, HeaderStatus, HeadersSyncPipeline, QueuedHeader}; +use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader}; use num_traits::{One, Saturating, Zero}; /// Common sync params. @@ -56,7 +56,7 @@ pub struct HeadersSync { /// Best header number known to source node. source_best_number: Option, /// Best header known to target node. - target_best_header: Option>, + target_best_header: Option>, /// Headers queue. headers: QueuedHeaders

, } @@ -78,7 +78,7 @@ impl HeadersSync

{ } /// Best header known to target node. - pub fn target_best_header(&self) -> Option> { + pub fn target_best_header(&self) -> Option> { self.target_best_header } @@ -94,7 +94,7 @@ impl HeadersSync

{ } /// Returns synchronization status. - pub fn status(&self) -> (&Option>, &Option) { + pub fn status(&self) -> (&Option>, &Option) { (&self.target_best_header, &self.source_best_number) } @@ -111,7 +111,7 @@ impl HeadersSync

{ /// Select header that needs to be downloaded from the source node. pub fn select_new_header_to_download(&self) -> Option { // if we haven't received best header from source node yet, there's nothing we can download - let source_best_number = self.source_best_number.clone()?; + let source_best_number = self.source_best_number?; // if we haven't received known best header from target node yet, there's nothing we can download let target_best_header = self.target_best_header.as_ref()?; @@ -205,7 +205,7 @@ impl HeadersSync

{ /// Receive new best header from the target node. /// Returns true if it is different from the previous block known to us. - pub fn target_best_header_response(&mut self, best_header: HeaderId) -> bool { + pub fn target_best_header_response(&mut self, best_header: HeaderIdOf

) -> bool { log::debug!( target: "bridge", "Received best known header from {}: {:?}", @@ -244,7 +244,7 @@ pub mod tests { use super::*; use crate::ethereum_types::{EthereumHeadersSyncPipeline, H256}; use crate::headers::tests::{header, id}; - use crate::sync_types::HeaderStatus; + use crate::sync_types::{HeaderId, HeaderStatus}; fn side_hash(number: u64) -> H256 { H256::from_low_u64_le(1000 + number) diff --git a/bridges/relays/ethereum/src/sync_loop.rs b/bridges/relays/ethereum/src/sync_loop.rs index 9128374e9f..be64e9fa50 100644 --- a/bridges/relays/ethereum/src/sync_loop.rs +++ b/bridges/relays/ethereum/src/sync_loop.rs @@ -18,7 +18,7 @@ use crate::metrics::{start as metrics_start, GlobalMetrics, MetricsParams, Regis use crate::sync::HeadersSyncParams; use crate::sync_loop_metrics::SyncLoopMetrics; use crate::sync_types::{ - HeaderId, HeaderStatus, HeadersSyncPipeline, MaybeConnectionError, QueuedHeader, SubmittedHeaders, + HeaderIdOf, HeaderStatus, HeadersSyncPipeline, MaybeConnectionError, QueuedHeader, SubmittedHeaders, }; use async_trait::async_trait; @@ -68,17 +68,15 @@ pub trait SourceClient: Sized { async fn header_by_number(&self, number: P::Number) -> Result; /// Get completion data by header hash. - async fn header_completion( - &self, - id: HeaderId, - ) -> Result<(HeaderId, Option), Self::Error>; + async fn header_completion(&self, id: HeaderIdOf

) + -> Result<(HeaderIdOf

, Option), Self::Error>; /// Get extra data by header hash. async fn header_extra( &self, - id: HeaderId, + id: HeaderIdOf

, header: QueuedHeader

, - ) -> Result<(HeaderId, P::Extra), Self::Error>; + ) -> Result<(HeaderIdOf

, P::Extra), Self::Error>; } /// Target client trait. @@ -88,35 +86,23 @@ pub trait TargetClient: Sized { type Error: std::fmt::Debug + MaybeConnectionError; /// Returns ID of best header known to the target node. - async fn best_header_id(&self) -> Result, Self::Error>; + async fn best_header_id(&self) -> Result, Self::Error>; /// Returns true if header is known to the target node. - async fn is_known_header( - &self, - id: HeaderId, - ) -> Result<(HeaderId, bool), Self::Error>; + async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), Self::Error>; /// Submit headers. - async fn submit_headers( - &self, - headers: Vec>, - ) -> SubmittedHeaders, Self::Error>; + async fn submit_headers(&self, headers: Vec>) -> SubmittedHeaders, Self::Error>; /// Returns ID of headers that require to be 'completed' before children can be submitted. - async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; + async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; /// Submit completion data for header. - async fn complete_header( - &self, - id: HeaderId, - completion: P::Completion, - ) -> Result, Self::Error>; + async fn complete_header(&self, id: HeaderIdOf

, completion: P::Completion) + -> Result, Self::Error>; /// Returns true if header requires extra data to be submitted. - async fn requires_extra( - &self, - header: QueuedHeader

, - ) -> Result<(HeaderId, bool), Self::Error>; + async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), Self::Error>; } /// Run headers synchronization. @@ -327,7 +313,7 @@ pub fn run>( }, submitted_headers = target_submit_header_future => { // following line helps Rust understand the type of `submitted_headers` :/ - let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; + let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; let maybe_fatal_error = submitted_headers.fatal_error.map(Err).unwrap_or(Ok(())); target_client_is_online = process_future_result( @@ -380,7 +366,7 @@ pub fn run>( // update metrics if metrics_enabled { metrics_global.update(); - metrics_sync.update(&mut sync); + metrics_sync.update(&sync); } // print progress diff --git a/bridges/relays/ethereum/src/sync_loop_metrics.rs b/bridges/relays/ethereum/src/sync_loop_metrics.rs index 657630ad40..fa34c8b511 100644 --- a/bridges/relays/ethereum/src/sync_loop_metrics.rs +++ b/bridges/relays/ethereum/src/sync_loop_metrics.rs @@ -55,8 +55,8 @@ impl SyncLoopMetrics { /// Update metrics. pub fn update(&mut self, sync: &HeadersSync

) { let headers = sync.headers(); - let source_best_number = sync.source_best_number().unwrap_or(Zero::zero()); - let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or(Zero::zero()); + let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); + let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); self.best_block_numbers .with_label_values(&["source"]) diff --git a/bridges/relays/ethereum/src/sync_loop_tests.rs b/bridges/relays/ethereum/src/sync_loop_tests.rs index 83fadce619..b49d4ce5e5 100644 --- a/bridges/relays/ethereum/src/sync_loop_tests.rs +++ b/bridges/relays/ethereum/src/sync_loop_tests.rs @@ -408,7 +408,7 @@ fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) { &mut backoff, |_| {}, &mut go_offline_future, - |delay| async_std::task::sleep(delay), + async_std::task::sleep, || "Test error".into(), ); diff --git a/bridges/relays/ethereum/src/sync_types.rs b/bridges/relays/ethereum/src/sync_types.rs index e1ea9023ea..1e2aca16f7 100644 --- a/bridges/relays/ethereum/src/sync_types.rs +++ b/bridges/relays/ethereum/src/sync_types.rs @@ -95,6 +95,9 @@ pub trait HeadersSyncPipeline: Clone + Copy { fn estimate_size(source: &QueuedHeader) -> usize; } +/// A HeaderId for `HeaderSyncPipeline`. +pub type HeaderIdOf

= HeaderId<

::Hash,

::Number>; + /// Header that we're receiving from source node. pub trait SourceHeader { /// Returns ID of header. diff --git a/bridges/relays/substrate/src/bridge.rs b/bridges/relays/substrate/src/bridge.rs index c203e85a2e..b797005210 100644 --- a/bridges/relays/substrate/src/bridge.rs +++ b/bridges/relays/substrate/src/bridge.rs @@ -156,9 +156,10 @@ pub async fn run_async(params: Params, exit: Box + Unpin Ok(()) } -fn initial_next_events<'a>( - chains: &'a HashMap>, -) -> Vec> + 'a>>> { +type EventsResult = Result<(ChainId, RawClientEvent), Error>; +type EventsFuture<'a> = Pin + 'a>>; + +fn initial_next_events<'a>(chains: &'a HashMap>) -> Vec> { chains .values() .map(|chain_cell| async move { @@ -175,12 +176,9 @@ fn initial_next_events<'a>( } async fn next_event<'a>( - next_events: Vec> + 'a>>>, + next_events: Vec>, chains: &'a HashMap>, -) -> ( - Result<(Hash, RawClientEvent), Error>, - Vec> + 'a>>>, -) { +) -> (Result<(Hash, RawClientEvent), Error>, Vec>) { let (result, _, mut rest) = future::select_all(next_events).await; match result {