style: Migrate to stable-only rustfmt configuration

- Remove nightly-only features from .rustfmt.toml and vendor/ss58-registry/rustfmt.toml
- Removed features: imports_granularity, wrap_comments, comment_width,
  reorder_impl_items, spaces_around_ranges, binop_separator,
  match_arm_blocks, trailing_semicolon, trailing_comma
- Format all 898 affected files with stable rustfmt
- Ensures long-term reliability without nightly toolchain dependency
This commit is contained in:
2025-12-22 17:12:58 +03:00
parent 3208f208c0
commit abc4c3989b
898 changed files with 8671 additions and 6432 deletions
+17 -10
View File
@@ -1,24 +1,31 @@
# Pezkuwi SDK - Stable Rustfmt Configuration
# Only stable features are used for long-term reliability
# Basic
edition = "2021"
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Misc
chain_width = 80
spaces_around_ranges = false
binop_separator = "Back"
reorder_impl_items = false
match_arm_leading_pipes = "Preserve"
match_arm_blocks = false
match_block_trailing_comma = true
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true
# Format comments
comment_width = 100
wrap_comments = true
# NOTE: The following nightly-only features were removed for stable compatibility:
# - imports_granularity = "Crate"
# - wrap_comments = true
# - comment_width = 100
# - reorder_impl_items = false
# - spaces_around_ranges = false
# - binop_separator = "Back"
# - match_arm_blocks = false
# - trailing_semicolon = false
# - trailing_comma = "Vertical"
+3 -2
View File
@@ -45,8 +45,9 @@ impl KeyValueDB for ParityDbWrapper {
fn write(&self, transaction: DBTransaction) -> io::Result<()> {
self.0
.commit(transaction.ops.iter().map(|op| match op {
kvdb::DBOp::Insert { col, key, value } =>
(*col as u8, &key[key.len() - 32..], Some(value.to_vec())),
kvdb::DBOp::Insert { col, key, value } => {
(*col as u8, &key[key.len() - 32..], Some(value.to_vec()))
},
kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None),
kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(),
}))
+6 -4
View File
@@ -64,17 +64,19 @@ impl BizinikiwiCli for Cli {
id: &str,
) -> std::result::Result<Box<dyn pezsc_service::ChainSpec>, String> {
let spec = match id {
"" =>
"" => {
return Err(
"Please specify which chain you want to run, e.g. --dev or --chain=local"
.into(),
),
)
},
"dev" => Box::new(chain_spec::development_config()),
"local" => Box::new(chain_spec::local_testnet_config()),
"fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?),
"staging" => Box::new(chain_spec::pezstaging_testnet_config()),
path =>
Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
path => {
Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?)
},
};
Ok(spec)
}
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(feature = "runtime-benchmarks")]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(feature = "runtime-benchmarks")]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(feature = "runtime-benchmarks")]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use nix::sys::signal::Signal::{self, SIGINT, SIGTERM};
@@ -16,7 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
+2 -2
View File
@@ -308,8 +308,8 @@ mod multiplier_tests {
// `cargo test congested_chain_simulation -- --nocapture` to get some insight.
// almost full. The entire quota of normal transactions is taken.
let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() -
Weight::from_parts(100, 0);
let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap()
- Weight::from_parts(100, 0);
// Default bizinikiwi weight.
let tx_weight = pezframe_support::weights::constants::ExtrinsicBaseWeight::get();
+14 -14
View File
@@ -442,21 +442,21 @@ impl InstanceFilter<RuntimeCall> for ProxyType {
ProxyType::Any => true,
ProxyType::NonTransfer => !matches!(
c,
RuntimeCall::Balances(..) |
RuntimeCall::Assets(..) |
RuntimeCall::Uniques(..) |
RuntimeCall::Nfts(..) |
RuntimeCall::Vesting(pezpallet_vesting::Call::vested_transfer { .. }) |
RuntimeCall::Indices(pezpallet_indices::Call::transfer { .. })
RuntimeCall::Balances(..)
| RuntimeCall::Assets(..)
| RuntimeCall::Uniques(..)
| RuntimeCall::Nfts(..)
| RuntimeCall::Vesting(pezpallet_vesting::Call::vested_transfer { .. })
| RuntimeCall::Indices(pezpallet_indices::Call::transfer { .. })
),
ProxyType::Governance => matches!(
c,
RuntimeCall::Democracy(..) |
RuntimeCall::Council(..) |
RuntimeCall::Society(..) |
RuntimeCall::TechnicalCommittee(..) |
RuntimeCall::Elections(..) |
RuntimeCall::Treasury(..)
RuntimeCall::Democracy(..)
| RuntimeCall::Council(..)
| RuntimeCall::Society(..)
| RuntimeCall::TechnicalCommittee(..)
| RuntimeCall::Elections(..)
| RuntimeCall::Treasury(..)
),
ProxyType::Staking => {
matches!(c, RuntimeCall::Staking(..) | RuntimeCall::FastUnstake(..))
@@ -856,8 +856,8 @@ impl Get<Option<BalancingConfig>> for OffchainRandomBalancing {
max => {
let seed = pezsp_io::offchain::random_seed();
let random = <u32>::decode(&mut TrailingZeroInput::new(&seed))
.expect("input is padded with zeroes; qed") %
max.saturating_add(1);
.expect("input is padded with zeroes; qed")
% max.saturating_add(1);
random as usize
},
};
+11 -8
View File
@@ -306,22 +306,24 @@ impl<'a> Iterator for BlockContentIterator<'a> {
tx_ext(0, pez_kitchensink_runtime::ExistentialDeposit::get() + 1),
),
function: match self.content.block_type {
BlockType::RandomTransfersKeepAlive =>
BlockType::RandomTransfersKeepAlive => {
RuntimeCall::Balances(BalancesCall::transfer_keep_alive {
dest: pezsp_runtime::MultiAddress::Id(receiver),
value: pez_kitchensink_runtime::ExistentialDeposit::get() + 1,
}),
})
},
BlockType::RandomTransfersReaping => {
RuntimeCall::Balances(BalancesCall::transfer_allow_death {
dest: pezsp_runtime::MultiAddress::Id(receiver),
// Transfer so that ending balance would be 1 less than existential
// deposit so that we kill the sender account.
value: 100 * DOLLARS -
(pez_kitchensink_runtime::ExistentialDeposit::get() - 1),
value: 100 * DOLLARS
- (pez_kitchensink_runtime::ExistentialDeposit::get() - 1),
})
},
BlockType::Noop =>
RuntimeCall::System(SystemCall::remark { remark: Vec::new() }),
BlockType::Noop => {
RuntimeCall::System(SystemCall::remark { remark: Vec::new() })
},
},
},
self.runtime_version.spec_version,
@@ -602,12 +604,13 @@ impl BenchKeyring {
.into()
},
ExtrinsicFormat::Bare => generic::UncheckedExtrinsic::new_bare(xt.function).into(),
ExtrinsicFormat::General(ext_version, tx_ext) =>
ExtrinsicFormat::General(ext_version, tx_ext) => {
generic::UncheckedExtrinsic::from_parts(
xt.function,
Preamble::General(ext_version, tx_ext),
)
.into(),
.into()
},
}
}
@@ -343,8 +343,9 @@ fn process_action<T: Serialize + Clone + Sync + 'static>(
builder: pezsc_chain_spec::ChainSpecBuilder<T>,
) -> Result<String, String> {
let builder = match cmd.action {
GenesisBuildAction::NamedPreset(NamedPresetCmd { ref preset_name }) =>
builder.with_genesis_config_preset_name(&preset_name),
GenesisBuildAction::NamedPreset(NamedPresetCmd { ref preset_name }) => {
builder.with_genesis_config_preset_name(&preset_name)
},
GenesisBuildAction::Patch(PatchCmd { ref patch_path }) => {
let patch = fs::read(patch_path.as_path())
.map_err(|e| format!("patch file {patch_path:?} shall be readable: {e}"))?;
@@ -421,8 +421,8 @@ impl FreeingBumpHeapAllocator {
let header_ptr: u32 = match self.free_lists[order] {
Link::Ptr(header_ptr) => {
if (u64::from(header_ptr) + u64::from(order.size()) + u64::from(HEADER_SIZE)) >
mem.size()
if (u64::from(header_ptr) + u64::from(order.size()) + u64::from(HEADER_SIZE))
> mem.size()
{
return Err(error("Invalid header pointer detected"));
}
+4 -4
View File
@@ -205,10 +205,10 @@ impl<Block: BlockT> Blockchain<Block> {
}
let this = self.storage.read();
let other = other.storage.read();
this.hashes == other.hashes &&
this.best_hash == other.best_hash &&
this.best_number == other.best_number &&
this.genesis_hash == other.genesis_hash
this.hashes == other.hashes
&& this.best_hash == other.best_hash
&& this.best_number == other.best_number
&& this.genesis_hash == other.genesis_hash
}
/// Insert header CHT root.
@@ -299,7 +299,7 @@ impl Registry {
None => {
wildcards.remove(&subscriber);
},
Some(filters) =>
Some(filters) => {
for key in filters.iter() {
let remove_key = match listeners.get_mut(key) {
Some(ref mut set) => {
@@ -312,7 +312,8 @@ impl Registry {
if remove_key {
listeners.remove(key);
}
},
}
},
}
}
@@ -503,9 +503,9 @@ where
"Publishing authority DHT record peer_id='{local_peer_id}' with addresses='{addresses:?}'",
);
if !self.warn_public_addresses &&
self.public_addresses.is_empty() &&
!has_global_listen_addresses
if !self.warn_public_addresses
&& self.public_addresses.is_empty()
&& !has_global_listen_addresses
{
self.warn_public_addresses = true;
@@ -754,8 +754,9 @@ where
// Make sure we don't ever work with an outdated set of authorities
// and that we do not update known_authorithies too often.
let best_hash = self.client.best_hash().await?;
if !self.known_authorities.contains_key(&record_key) &&
self.authorities_queried_at
if !self.known_authorities.contains_key(&record_key)
&& self
.authorities_queried_at
.map(|authorities_queried_at| authorities_queried_at != best_hash)
.unwrap_or(true)
{
@@ -991,8 +992,8 @@ where
"Found same record for {:?} record creation time {:?}",
authority_id, new_record.creation_time
);
if current_record_info.peers_with_record.len() + new_record.peers_with_record.len() <=
DEFAULT_KADEMLIA_REPLICATION_FACTOR
if current_record_info.peers_with_record.len() + new_record.peers_with_record.len()
<= DEFAULT_KADEMLIA_REPLICATION_FACTOR
{
current_record_info.peers_with_record.extend(new_record.peers_with_record);
}
@@ -395,8 +395,9 @@ where
let mode = block_builder.extrinsic_inclusion_mode();
let end_reason = match mode {
ExtrinsicInclusionMode::AllExtrinsics =>
self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
ExtrinsicInclusionMode::AllExtrinsics => {
self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?
},
ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
};
let (block, storage_changes, proof) = block_builder.build()?.into_inner();
@@ -1002,13 +1003,13 @@ mod tests {
.chain((1..extrinsics_num as u64).map(extrinsic))
.collect::<Vec<_>>();
let block_limit = genesis_header.encoded_size() +
extrinsics
let block_limit = genesis_header.encoded_size()
+ extrinsics
.iter()
.take(extrinsics_num - 1)
.map(Encode::encoded_size)
.sum::<usize>() +
Vec::<Extrinsic>::new().encoded_size();
.sum::<usize>()
+ Vec::<Extrinsic>::new().encoded_size();
block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
+21 -13
View File
@@ -53,9 +53,9 @@ enum GenesisBuildAction<EHF> {
impl<EHF> GenesisBuildAction<EHF> {
pub fn merge_patch(&mut self, patch: json::Value) {
match self {
GenesisBuildAction::Patch(value) |
GenesisBuildAction::Full(value) |
GenesisBuildAction::NamedPreset(_, value, _) => json_merge(value, patch),
GenesisBuildAction::Patch(value)
| GenesisBuildAction::Full(value)
| GenesisBuildAction::NamedPreset(_, value, _) => json_merge(value, patch),
}
}
}
@@ -65,8 +65,9 @@ impl<EHF> Clone for GenesisBuildAction<EHF> {
match self {
Self::Patch(ref p) => Self::Patch(p.clone()),
Self::Full(ref f) => Self::Full(f.clone()),
Self::NamedPreset(ref p, patch, _) =>
Self::NamedPreset(p.clone(), patch.clone(), Default::default()),
Self::NamedPreset(ref p, patch, _) => {
Self::NamedPreset(p.clone(), patch.clone(), Default::default())
},
}
}
}
@@ -124,16 +125,18 @@ impl<EHF: HostFunctions> GenesisSource<EHF> {
Ok(genesis.genesis)
},
Self::Storage(storage) => Ok(Genesis::Raw(RawGenesis::from(storage.clone()))),
Self::GenesisBuilderApi(GenesisBuildAction::Full(config), code) =>
Self::GenesisBuilderApi(GenesisBuildAction::Full(config), code) => {
Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner {
json_blob: RuntimeGenesisConfigJson::Config(config.clone()),
code: code.clone(),
})),
Self::GenesisBuilderApi(GenesisBuildAction::Patch(patch), code) =>
}))
},
Self::GenesisBuilderApi(GenesisBuildAction::Patch(patch), code) => {
Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner {
json_blob: RuntimeGenesisConfigJson::Patch(patch.clone()),
code: code.clone(),
})),
}))
},
Self::GenesisBuilderApi(GenesisBuildAction::NamedPreset(name, patch, _), code) => {
let mut preset =
RuntimeCaller::<EHF>::new(&code[..]).get_named_preset(Some(name))?;
@@ -168,8 +171,9 @@ where
// The `StateRootHash` variant exists as a way to keep note that other clients support
// it, but Bizinikiwi itself isn't capable of loading chain specs with just a hash at
// the moment.
Genesis::StateRootHash(_) =>
return Err("Genesis storage in hash format not supported".into()),
Genesis::StateRootHash(_) => {
return Err("Genesis storage in hash format not supported".into())
},
Genesis::RuntimeGenesis(RuntimeGenesisInner {
json_blob: RuntimeGenesisConfigJson::Config(config),
code,
@@ -619,8 +623,12 @@ where
RawGenesis::from(storage)
},
(true, Genesis::Raw(raw)) => raw,
(_, genesis) =>
return Ok(ChainSpecJsonContainer { client_spec: self.client_spec.clone(), genesis }),
(_, genesis) => {
return Ok(ChainSpecJsonContainer {
client_spec: self.client_spec.clone(),
genesis,
})
},
};
Ok(ChainSpecJsonContainer {
@@ -33,10 +33,11 @@ use serde_json::Value;
/// * `b` - The JSON object to merge with `a`.
pub fn merge(a: &mut Value, b: Value) {
match (a, b) {
(Value::Object(a), Value::Object(b)) =>
(Value::Object(a), Value::Object(b)) => {
for (k, v) in b {
merge(a.entry(k).or_insert(Value::Null), v);
},
}
},
(a, b) => *a = b,
};
}
+12 -8
View File
@@ -79,14 +79,18 @@ pub fn execution_method_from_cli(
pezsc_service::config::WasmExecutionMethod::Compiled {
instantiation_strategy: match instantiation_strategy {
WasmtimeInstantiationStrategy::PoolingCopyOnWrite =>
pezsc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite =>
pezsc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite,
WasmtimeInstantiationStrategy::Pooling =>
pezsc_service::config::WasmtimeInstantiationStrategy::Pooling,
WasmtimeInstantiationStrategy::RecreateInstance =>
pezsc_service::config::WasmtimeInstantiationStrategy::RecreateInstance,
WasmtimeInstantiationStrategy::PoolingCopyOnWrite => {
pezsc_service::config::WasmtimeInstantiationStrategy::PoolingCopyOnWrite
},
WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite => {
pezsc_service::config::WasmtimeInstantiationStrategy::RecreateInstanceCopyOnWrite
},
WasmtimeInstantiationStrategy::Pooling => {
pezsc_service::config::WasmtimeInstantiationStrategy::Pooling
},
WasmtimeInstantiationStrategy::RecreateInstance => {
pezsc_service::config::WasmtimeInstantiationStrategy::RecreateInstance
},
},
}
}
@@ -106,10 +106,12 @@ pub enum TrieCacheWarmUpStrategy {
impl From<TrieCacheWarmUpStrategy> for pezsc_service::config::TrieCacheWarmUpStrategy {
fn from(strategy: TrieCacheWarmUpStrategy) -> Self {
match strategy {
TrieCacheWarmUpStrategy::NonBlocking =>
pezsc_service::config::TrieCacheWarmUpStrategy::NonBlocking,
TrieCacheWarmUpStrategy::Blocking =>
pezsc_service::config::TrieCacheWarmUpStrategy::Blocking,
TrieCacheWarmUpStrategy::NonBlocking => {
pezsc_service::config::TrieCacheWarmUpStrategy::NonBlocking
},
TrieCacheWarmUpStrategy::Blocking => {
pezsc_service::config::TrieCacheWarmUpStrategy::Blocking
},
}
}
}
@@ -240,15 +240,16 @@ impl NetworkParams {
// Activate if the user explicitly requested local discovery, `--dev` is given or the
// chain type is `Local`/`Development`
let allow_non_globals_in_dht =
self.discover_local ||
is_dev || matches!(chain_type, ChainType::Local | ChainType::Development);
self.discover_local
|| is_dev || matches!(chain_type, ChainType::Local | ChainType::Development);
let allow_private_ip = match (self.allow_private_ip, self.no_private_ip) {
(true, true) => unreachable!("`*_private_ip` flags are mutually exclusive; qed"),
(true, false) => true,
(false, true) => false,
(false, false) =>
is_dev || matches!(chain_type, ChainType::Local | ChainType::Development),
(false, false) => {
is_dev || matches!(chain_type, ChainType::Local | ChainType::Development)
},
};
NetworkConfiguration {
@@ -115,9 +115,9 @@ impl NodeKeyParams {
.node_key_file
.clone()
.unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE));
if !self.unsafe_force_node_key_generation &&
role.is_authority() &&
!is_dev && !key_path.exists()
if !self.unsafe_force_node_key_generation
&& role.is_authority()
&& !is_dev && !key_path.exists()
{
return Err(Error::NetworkKeyNotFound(key_path));
}
@@ -169,7 +169,9 @@ mod tests {
params.node_key(net_config_dir, Role::Authority, false).and_then(|c| match c {
NodeKeyConfig::Ed25519(pezsc_network::config::Secret::Input(ref ski))
if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() =>
Ok(()),
{
Ok(())
},
_ => Err(error::Error::Input("Unexpected node key config".into())),
})
})
@@ -239,9 +241,11 @@ mod tests {
let typ = params.node_key_type;
params.node_key(net_config_dir, role, is_dev).and_then(move |c| match c {
NodeKeyConfig::Ed25519(pezsc_network::config::Secret::File(ref f))
if typ == NodeKeyType::Ed25519 &&
f == &dir.join(NODE_KEY_ED25519_FILE) =>
Ok(()),
if typ == NodeKeyType::Ed25519
&& f == &dir.join(NODE_KEY_ED25519_FILE) =>
{
Ok(())
},
_ => Err(error::Error::Input("Unexpected node key config".into())),
})
},
@@ -224,8 +224,8 @@ impl RpcParams {
for endpoint in &self.experimental_rpc_endpoint {
// Technically, `0.0.0.0` isn't a public IP address, but it's a way to listen on
// all interfaces. Thus, we consider it as a public endpoint and warn about it.
if endpoint.rpc_methods == RpcMethods::Unsafe && endpoint.is_global() ||
endpoint.listen_addr.ip().is_unspecified()
if endpoint.rpc_methods == RpcMethods::Unsafe && endpoint.is_global()
|| endpoint.listen_addr.ip().is_unspecified()
{
eprintln!(
"It isn't safe to expose RPC publicly without a proxy server that filters \
@@ -32,10 +32,12 @@ pub enum TransactionPoolType {
impl Into<pezsc_transaction_pool::TransactionPoolType> for TransactionPoolType {
fn into(self) -> pezsc_transaction_pool::TransactionPoolType {
match self {
TransactionPoolType::SingleState =>
pezsc_transaction_pool::TransactionPoolType::SingleState,
TransactionPoolType::ForkAware =>
pezsc_transaction_pool::TransactionPoolType::ForkAware,
TransactionPoolType::SingleState => {
pezsc_transaction_pool::TransactionPoolType::SingleState
},
TransactionPoolType::ForkAware => {
pezsc_transaction_pool::TransactionPoolType::ForkAware
},
}
}
}
@@ -88,8 +88,9 @@ where
Ok(CheckedHeader::Checked(header, (slot, seal)))
},
Err(SealVerificationError::Deferred(header, slot)) =>
Ok(CheckedHeader::Deferred(header, slot)),
Err(SealVerificationError::Deferred(header, slot)) => {
Ok(CheckedHeader::Deferred(header, slot))
},
Err(SealVerificationError::Unsealed) => Err(Error::HeaderUnsealed(hash)),
Err(SealVerificationError::BadSeal) => Err(Error::HeaderBadSeal(hash)),
Err(SealVerificationError::BadSignature) => Err(Error::BadSignature(hash)),
+3 -2
View File
@@ -520,7 +520,7 @@ where
match compatibility_mode {
CompatibilityMode::None => {},
// Use `initialize_block` until we hit the block that should disable the mode.
CompatibilityMode::UseInitializeBlock { until } =>
CompatibilityMode::UseInitializeBlock { until } => {
if *until > context_block_number {
runtime_api
.initialize_block(
@@ -534,7 +534,8 @@ where
),
)
.map_err(|_| ConsensusError::InvalidAuthoritiesSet)?;
},
}
},
}
runtime_api
@@ -208,7 +208,7 @@ where
match compatibility_mode {
CompatibilityMode::None => {},
// Use `initialize_block` until we hit the block that should disable the mode.
CompatibilityMode::UseInitializeBlock { until } =>
CompatibilityMode::UseInitializeBlock { until } => {
if *until > context_block_number {
runtime_api
.initialize_block(
@@ -222,7 +222,8 @@ where
),
)
.map_err(|_| ConsensusError::InvalidAuthoritiesSet)?;
},
}
},
}
runtime_api
@@ -53,8 +53,8 @@ pub(super) fn calculate_primary_threshold(
let c = c.0 as f64 / c.1 as f64;
let theta = authorities[authority_index].1 as f64 /
authorities.iter().map(|(_, weight)| weight).sum::<u64>() as f64;
let theta = authorities[authority_index].1 as f64
/ authorities.iter().map(|(_, weight)| weight).sum::<u64>() as f64;
assert!(theta > 0.0, "authority with weight 0.");
@@ -204,8 +204,8 @@ pub fn claim_slot_using_keys(
keys: &[(AuthorityId, usize)],
) -> Option<(PreDigest, AuthorityId)> {
claim_primary_slot(slot, epoch, epoch.config.c, keystore, keys).or_else(|| {
if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() ||
epoch.config.allowed_slots.is_secondary_vrf_slots_allowed()
if epoch.config.allowed_slots.is_secondary_plain_slots_allowed()
|| epoch.config.allowed_slots.is_secondary_vrf_slots_allowed()
{
claim_secondary_slot(
slot,
@@ -62,21 +62,25 @@ pub fn load_epoch_changes<Block: BlockT, B: AuxStore>(
let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?;
let maybe_epoch_changes = match version {
None =>
None => {
load_decode::<_, EpochChangesV0For<Block, EpochV0>>(backend, BABE_EPOCH_CHANGES_KEY)?
.map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))),
Some(1) =>
.map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config)))
},
Some(1) => {
load_decode::<_, EpochChangesV1For<Block, EpochV0>>(backend, BABE_EPOCH_CHANGES_KEY)?
.map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))),
.map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config)))
},
Some(2) => {
// v2 still uses `EpochChanges` v1 format but with a different `Epoch` type.
load_decode::<_, EpochChangesV1For<Block, Epoch>>(backend, BABE_EPOCH_CHANGES_KEY)?
.map(|v2| v2.migrate())
},
Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) =>
load_decode::<_, EpochChangesFor<Block, Epoch>>(backend, BABE_EPOCH_CHANGES_KEY)?,
Some(other) =>
return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))),
Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => {
load_decode::<_, EpochChangesFor<Block, Epoch>>(backend, BABE_EPOCH_CHANGES_KEY)?
},
Some(other) => {
return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other)))
},
};
let epoch_changes =
@@ -199,8 +203,8 @@ mod test {
.tree()
.iter()
.map(|(_, _, epoch)| epoch.clone())
.collect::<Vec<_>>() ==
vec![PersistedEpochHeader::Regular(EpochHeader {
.collect::<Vec<_>>()
== vec![PersistedEpochHeader::Regular(EpochHeader {
start_slot: 0.into(),
end_slot: 100.into(),
})],
+39 -28
View File
@@ -401,10 +401,11 @@ where
}
},
Some(2) => runtime_api.configuration(at_hash)?,
_ =>
_ => {
return Err(pezsp_blockchain::Error::VersionInvalid(
"Unsupported or invalid BabeApi version".to_string(),
)),
))
},
};
Ok(config)
}
@@ -791,13 +792,14 @@ where
let sinks = &mut self.slot_notification_sinks.lock();
sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) {
Ok(()) => true,
Err(e) =>
Err(e) => {
if e.is_full() {
warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full");
true
} else {
false
},
}
},
});
}
@@ -927,8 +929,9 @@ pub fn find_next_epoch_digest<B: BlockT>(
trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, epoch_digest.is_some()) {
(Some(ConsensusLog::NextEpochData(_)), true) =>
return Err(babe_err(Error::MultipleEpochChangeDigests)),
(Some(ConsensusLog::NextEpochData(_)), true) => {
return Err(babe_err(Error::MultipleEpochChangeDigests))
},
(Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch),
_ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
}
@@ -946,8 +949,9 @@ fn find_next_config_digest<B: BlockT>(
trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, config_digest.is_some()) {
(Some(ConsensusLog::NextConfigData(_)), true) =>
return Err(babe_err(Error::MultipleConfigChangeDigests)),
(Some(ConsensusLog::NextConfigData(_)), true) => {
return Err(babe_err(Error::MultipleConfigChangeDigests))
},
(Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config),
_ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"),
}
@@ -1094,8 +1098,8 @@ fn is_state_sync_or_gap_sync_import<B: BlockT>(
) -> bool {
let number = *block.header.number();
let info = client.info();
info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) ||
block.with_state()
info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end)
|| block.with_state()
}
/// A block-import handler for BABE.
@@ -1200,11 +1204,12 @@ where
let import_result = self.inner.import_block(block).await;
let aux = match import_result {
Ok(ImportResult::Imported(aux)) => aux,
Ok(r) =>
Ok(r) => {
return Err(ConsensusError::ClientImport(format!(
"Unexpected import result: {:?}",
r
))),
)))
},
Err(r) => return Err(r.into()),
};
@@ -1271,8 +1276,9 @@ where
.get(babe_pre_digest.authority_index() as usize)
{
Some(author) => author.0.clone(),
None =>
return Err(ConsensusError::Other(Error::<Block>::SlotAuthorNotFound.into())),
None => {
return Err(ConsensusError::Other(Error::<Block>::SlotAuthorNotFound.into()))
},
}
};
if let Err(err) = self
@@ -1321,12 +1327,14 @@ where
.await
.map_err(|e| {
ConsensusError::Other(Box::new(match e {
CheckInherentsError::CreateInherentData(e) =>
Error::<Block>::CreateInherents(e),
CheckInherentsError::CreateInherentData(e) => {
Error::<Block>::CreateInherents(e)
},
CheckInherentsError::Client(e) => Error::RuntimeApi(e),
CheckInherentsError::CheckInherents(e) => Error::CheckInherents(e),
CheckInherentsError::CheckInherentsUnknownError(id) =>
Error::CheckInherentsUnhandled(id),
CheckInherentsError::CheckInherentsUnknownError(id) => {
Error::CheckInherentsUnhandled(id)
},
}))
})?;
let (_, inner_body) = new_block.deconstruct();
@@ -1463,8 +1471,8 @@ where
// Skip babe logic if block already in chain or importing blocks during initial sync,
// otherwise the check for epoch changes will error because trying to re-import an
// epoch change or because of missing epoch data in the tree, respectively.
if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) ||
block_status == BlockStatus::InChain
if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end)
|| block_status == BlockStatus::InChain
{
// When re-importing existing block strip away intermediates.
// In case of initial sync intermediates should not be present...
@@ -1552,18 +1560,21 @@ where
match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) {
(true, true, _) => {},
(false, false, false) => {},
(false, false, true) =>
(false, false, true) => {
return Err(ConsensusError::ClientImport(
babe_err(Error::<Block>::UnexpectedConfigChange).into(),
)),
(true, false, _) =>
))
},
(true, false, _) => {
return Err(ConsensusError::ClientImport(
babe_err(Error::<Block>::ExpectedEpochChange(hash, slot)).into(),
)),
(false, true, _) =>
))
},
(false, true, _) => {
return Err(ConsensusError::ClientImport(
babe_err(Error::<Block>::UnexpectedEpochChange).into(),
)),
))
},
}
if let Some(next_epoch_descriptor) = next_epoch_digest {
@@ -1953,8 +1964,8 @@ where
let mut hash = leaf;
loop {
let meta = client.header_metadata(hash)?;
if meta.number <= revert_up_to_number ||
!weight_keys.insert(aux_schema::block_weight_key(hash))
if meta.number <= revert_up_to_number
|| !weight_keys.insert(aux_schema::block_weight_key(hash))
{
// We've reached the revert point or an already processed branch, stop here.
break;
@@ -73,10 +73,12 @@ where
{
warn!(target: LOG_TARGET, "🥩 backend contains a BEEFY state of an obsolete version {v}. ignoring...")
},
Some(4) =>
return load_decode::<_, PersistedState<B, AuthorityId>>(backend, WORKER_STATE_KEY),
other =>
return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other))),
Some(4) => {
return load_decode::<_, PersistedState<B, AuthorityId>>(backend, WORKER_STATE_KEY)
},
other => {
return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other)))
},
}
// No persistent state found in DB.
@@ -150,12 +150,13 @@ impl<B: Block, AuthorityId: AuthorityIdBound> Filter<B, AuthorityId> {
f.start = cfg.start;
f.end = cfg.end;
},
_ =>
_ => {
self.inner = Some(FilterInner {
start: cfg.start,
end: cfg.end,
validator_set: cfg.validator_set.clone(),
}),
})
},
}
}
@@ -33,8 +33,9 @@ pub(crate) fn proof_block_num_and_set_id<Block: BlockT, AuthorityId: AuthorityId
proof: &BeefyVersionedFinalityProof<Block, AuthorityId>,
) -> (NumberFor<Block>, ValidatorSetId) {
match proof {
VersionedFinalityProof::V1(sc) =>
(sc.commitment.block_number, sc.commitment.validator_set_id),
VersionedFinalityProof::V1(sc) => {
(sc.commitment.block_number, sc.commitment.validator_set_id)
},
}
}
+3 -2
View File
@@ -787,10 +787,11 @@ where
Some(active) => return Ok(active),
// Move up the chain. Ultimately we'll get it from chain genesis state, or error out
// there.
None =>
None => {
header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY)
.await
.map_err(|e| Error::Backend(e.to_string()))?,
.map_err(|e| Error::Backend(e.to_string()))?
},
}
}
}
@@ -180,8 +180,8 @@ where
// add valid vote
let round = self.rounds.entry(vote.commitment.clone()).or_default();
if round.add_vote((vote.id, vote.signature)) &&
round.is_done(threshold(self.validator_set.len()))
if round.add_vote((vote.id, vote.signature))
&& round.is_done(threshold(self.validator_set.len()))
{
if let Some(round) = self.rounds.remove_entry(&vote.commitment) {
return VoteImportResult::RoundConcluded(self.signed_commitment(round));
@@ -1576,8 +1576,9 @@ async fn gossipped_finality_proofs() {
.ok()
.and_then(|message| match message {
GossipMessage::<Block, ecdsa_crypto::AuthorityId>::Vote(_) => unreachable!(),
GossipMessage::<Block, ecdsa_crypto::AuthorityId>::FinalityProof(proof) =>
Some(proof),
GossipMessage::<Block, ecdsa_crypto::AuthorityId>::FinalityProof(proof) => {
Some(proof)
},
})
})
.fuse(),
@@ -526,7 +526,7 @@ where
{
let block_num = vote.commitment.block_number;
match self.voting_oracle().triage_round(block_num)? {
RoundAction::Process =>
RoundAction::Process => {
if let Some(finality_proof) = self.handle_vote(vote)? {
let gossip_proof =
GossipMessage::<B, AuthorityId>::FinalityProof(finality_proof);
@@ -536,7 +536,8 @@ where
encoded_proof,
true,
);
},
}
},
RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_votes),
RoundAction::Enqueue => error!(target: LOG_TARGET, "🥩 unexpected vote: {:?}.", vote),
};
@@ -157,9 +157,9 @@ impl<Block: BlockT> StateAction<Block> {
/// Check if execution checks that require runtime calls should be skipped.
pub fn skip_execution_checks(&self) -> bool {
match self {
StateAction::ApplyChanges(_) |
StateAction::Execute |
StateAction::ExecuteIfPossible => false,
StateAction::ApplyChanges(_)
| StateAction::Execute
| StateAction::ExecuteIfPossible => false,
StateAction::Skip => true,
}
}
@@ -193,8 +193,8 @@ impl<BlockNumber: fmt::Debug + PartialEq> BlockImportStatus<BlockNumber> {
/// Returns the imported block number.
pub fn number(&self) -> &BlockNumber {
match self {
BlockImportStatus::ImportedKnown(n, _) |
BlockImportStatus::ImportedUnknown(n, _, _) => n,
BlockImportStatus::ImportedKnown(n, _)
| BlockImportStatus::ImportedUnknown(n, _, _) => n,
}
}
}
@@ -242,8 +242,9 @@ pub async fn import_single_block<B: BlockT, V: Verifier<B>>(
) -> BlockImportResult<B> {
match verify_single_block_metered(import_handle, block_origin, block, verifier, None).await? {
SingleBlockVerificationOutcome::Imported(import_status) => Ok(import_status),
SingleBlockVerificationOutcome::Verified(import_parameters) =>
import_single_block_metered(import_handle, import_parameters, None).await,
SingleBlockVerificationOutcome::Verified(import_parameters) => {
import_single_block_metered(import_handle, import_parameters, None).await
},
}
}
@@ -262,8 +263,9 @@ where
trace!(target: LOG_TARGET, "Block already in chain {}: {:?}", number, hash);
Ok(BlockImportStatus::ImportedKnown(number, block_origin))
},
Ok(ImportResult::Imported(aux)) =>
Ok(BlockImportStatus::ImportedUnknown(number, aux, block_origin)),
Ok(ImportResult::Imported(aux)) => {
Ok(BlockImportStatus::ImportedUnknown(number, aux, block_origin))
},
Ok(ImportResult::MissingState) => {
debug!(
target: LOG_TARGET,
@@ -310,8 +310,9 @@ impl<B: BlockT> BlockImportWorker<B> {
// Make sure to first process all justifications
while let Poll::Ready(justification) = futures::poll!(justification_port.next()) {
match justification {
Some(ImportJustification(who, hash, number, justification)) =>
worker.import_justification(who, hash, number, justification).await,
Some(ImportJustification(who, hash, number, justification)) => {
worker.import_justification(who, hash, number, justification).await
},
None => {
log::debug!(
target: LOG_TARGET,
@@ -361,8 +362,9 @@ impl<B: BlockT> BlockImportWorker<B> {
});
match result {
Ok(()) => JustificationImportResult::Success,
Err(pezsp_consensus::Error::OutdatedJustification) =>
JustificationImportResult::OutdatedJustification,
Err(pezsp_consensus::Error::OutdatedJustification) => {
JustificationImportResult::OutdatedJustification
},
Err(_) => JustificationImportResult::Failure,
}
},
@@ -127,12 +127,15 @@ impl<B: BlockT> BufferedLinkReceiver<B> {
/// Send action for the synchronization to perform.
pub fn send_actions(&mut self, msg: BlockImportWorkerMsg<B>, link: &dyn Link<B>) {
match msg {
BlockImportWorkerMsg::BlocksProcessed(imported, count, results) =>
link.blocks_processed(imported, count, results),
BlockImportWorkerMsg::JustificationImported(who, hash, number, import_result) =>
link.justification_imported(who, &hash, number, import_result),
BlockImportWorkerMsg::RequestJustification(hash, number) =>
link.request_justification(&hash, number),
BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => {
link.blocks_processed(imported, count, results)
},
BlockImportWorkerMsg::JustificationImported(who, hash, number, import_result) => {
link.justification_imported(who, &hash, number, import_result)
},
BlockImportWorkerMsg::RequestJustification(hash, number) => {
link.request_justification(&hash, number)
},
}
}
+56 -36
View File
@@ -199,8 +199,9 @@ where
pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch<E> {
let next = self.as_ref().increment(next_descriptor);
let to_persist = match *self {
ViableEpoch::UnimportedGenesis(ref epoch_0) =>
PersistedEpoch::Genesis(epoch_0.clone(), next),
ViableEpoch::UnimportedGenesis(ref epoch_0) => {
PersistedEpoch::Genesis(epoch_0.clone(), next)
},
ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next),
};
@@ -246,8 +247,9 @@ impl<E> PersistedEpoch<E> {
impl<'a, E: Epoch> From<&'a PersistedEpoch<E>> for PersistedEpochHeader<E> {
fn from(epoch: &'a PersistedEpoch<E>) -> Self {
match epoch {
PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) =>
PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()),
PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => {
PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into())
},
PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()),
}
}
@@ -261,8 +263,9 @@ impl<E: Epoch> PersistedEpoch<E> {
F: FnMut(&Hash, &Number, E) -> B,
{
match self {
PersistedEpoch::Genesis(epoch_0, epoch_1) =>
PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)),
PersistedEpoch::Genesis(epoch_0, epoch_1) => {
PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1))
},
PersistedEpoch::Regular(epoch_n) => PersistedEpoch::Regular(f(h, n, epoch_n)),
}
}
@@ -426,13 +429,19 @@ where
self.epochs.get(&(id.hash, id.number)).and_then(|v| match v {
PersistedEpoch::Genesis(ref epoch_0, _)
if id.position == EpochIdentifierPosition::Genesis0 =>
Some(epoch_0),
{
Some(epoch_0)
},
PersistedEpoch::Genesis(_, ref epoch_1)
if id.position == EpochIdentifierPosition::Genesis1 =>
Some(epoch_1),
{
Some(epoch_1)
},
PersistedEpoch::Regular(ref epoch_n)
if id.position == EpochIdentifierPosition::Regular =>
Some(epoch_n),
{
Some(epoch_n)
},
_ => None,
})
}
@@ -447,10 +456,12 @@ where
G: FnOnce(E::Slot) -> E,
{
match descriptor {
ViableEpochDescriptor::UnimportedGenesis(slot) =>
Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))),
ViableEpochDescriptor::Signaled(identifier, _) =>
self.epoch(identifier).map(ViableEpoch::Signaled),
ViableEpochDescriptor::UnimportedGenesis(slot) => {
Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot)))
},
ViableEpochDescriptor::Signaled(identifier, _) => {
self.epoch(identifier).map(ViableEpoch::Signaled)
},
}
}
@@ -459,13 +470,19 @@ where
self.epochs.get_mut(&(id.hash, id.number)).and_then(|v| match v {
PersistedEpoch::Genesis(ref mut epoch_0, _)
if id.position == EpochIdentifierPosition::Genesis0 =>
Some(epoch_0),
{
Some(epoch_0)
},
PersistedEpoch::Genesis(_, ref mut epoch_1)
if id.position == EpochIdentifierPosition::Genesis1 =>
Some(epoch_1),
{
Some(epoch_1)
},
PersistedEpoch::Regular(ref mut epoch_n)
if id.position == EpochIdentifierPosition::Regular =>
Some(epoch_n),
{
Some(epoch_n)
},
_ => None,
})
}
@@ -480,10 +497,12 @@ where
G: FnOnce(E::Slot) -> E,
{
match descriptor {
ViableEpochDescriptor::UnimportedGenesis(slot) =>
Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))),
ViableEpochDescriptor::Signaled(identifier, _) =>
self.epoch_mut(identifier).map(ViableEpoch::Signaled),
ViableEpochDescriptor::UnimportedGenesis(slot) => {
Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot)))
},
ViableEpochDescriptor::Signaled(identifier, _) => {
self.epoch_mut(identifier).map(ViableEpoch::Signaled)
},
}
}
@@ -589,8 +608,9 @@ where
(EpochIdentifierPosition::Genesis0, epoch_0.clone())
}
},
PersistedEpochHeader::Regular(ref epoch_n) =>
(EpochIdentifierPosition::Regular, epoch_n.clone()),
PersistedEpochHeader::Regular(ref epoch_n) => {
(EpochIdentifierPosition::Regular, epoch_n.clone())
},
},
node,
)
@@ -665,8 +685,8 @@ where
let is_descendent_of = descendent_of_builder.build_is_descendent_of(None);
let filter = |node_hash: &Hash, node_num: &Number, _: &PersistedEpochHeader<E>| {
if number >= *node_num &&
(is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash)
if number >= *node_num
&& (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash)
{
// Continue the search in this subtree.
FilterAction::KeepNode
@@ -1063,18 +1083,18 @@ mod tests {
let is_descendent_of = |base: &Hash, block: &Hash| -> Result<bool, TestError> {
match (block, base) {
| (b"A", b"0") |
(b"B", b"0" | b"A") |
(b"C", b"0" | b"A" | b"B") |
(b"D", b"0" | b"A" | b"B" | b"C") |
(b"E", b"0" | b"A" | b"B" | b"C" | b"D") |
(b"F", b"0" | b"A" | b"B" | b"C" | b"D" | b"E") |
(b"G", b"0" | b"A" | b"B" | b"C" | b"D" | b"E") |
(b"H", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G") |
(b"I", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H") |
(b"J", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H" | b"I") |
(b"K", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H" | b"I" | b"J") |
(
| (b"A", b"0")
| (b"B", b"0" | b"A")
| (b"C", b"0" | b"A" | b"B")
| (b"D", b"0" | b"A" | b"B" | b"C")
| (b"E", b"0" | b"A" | b"B" | b"C" | b"D")
| (b"F", b"0" | b"A" | b"B" | b"C" | b"D" | b"E")
| (b"G", b"0" | b"A" | b"B" | b"C" | b"D" | b"E")
| (b"H", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G")
| (b"I", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H")
| (b"J", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H" | b"I")
| (b"K", b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H" | b"I" | b"J")
| (
b"L",
b"0" | b"A" | b"B" | b"C" | b"D" | b"E" | b"G" | b"H" | b"I" | b"J" | b"K",
) => Ok(true),
@@ -230,8 +230,8 @@ where
F: Fn(&H, &H) -> Result<bool, E>,
{
let filter = |node_hash: &H, node_num: &N, _: &PendingChange<H, N>| {
if number >= *node_num &&
(is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash)
if number >= *node_num
&& (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash)
{
// Continue the search in this subtree.
FilterAction::KeepNode
@@ -291,8 +291,9 @@ where
}
let earliest = match (forced, standard) {
(Some(forced), Some(standard)) =>
Some(if forced.1 < standard.1 { forced } else { standard }),
(Some(forced), Some(standard)) => {
Some(if forced.1 < standard.1 { forced } else { standard })
},
(Some(forced), None) => Some(forced),
(None, Some(standard)) => Some(standard),
(None, None) => None,
@@ -476,8 +477,8 @@ where
// check if there's any pending standard change that we depend on
for (_, _, standard_change) in self.pending_standard_changes.roots() {
if standard_change.effective_number() <= median_last_finalized &&
is_descendent_of(&standard_change.canon_hash, &change.canon_hash)?
if standard_change.effective_number() <= median_last_finalized
&& is_descendent_of(&standard_change.canon_hash, &change.canon_hash)?
{
log::info!(target: LOG_TARGET,
"Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}",
@@ -566,8 +567,8 @@ where
// we will keep all forced changes for any later blocks and that are a
// descendent of the finalized block (i.e. they are part of this branch).
for change in pending_forced_changes {
if change.effective_number() > finalized_number &&
is_descendent_of(&finalized_hash, &change.canon_hash)?
if change.effective_number() > finalized_number
&& is_descendent_of(&finalized_hash, &change.canon_hash)?
{
self.pending_forced_changes.push(change)
}
@@ -382,8 +382,12 @@ where
});
}
},
Some(other) =>
return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))),
Some(other) => {
return Err(ClientError::Backend(format!(
"Unsupported GRANDPA DB version: {:?}",
other
)))
},
}
// genesis.
@@ -199,12 +199,13 @@ impl<N: Ord> View<N> {
// the one we're aware of.
match self.last_commit {
None => Consider::Accept,
Some(ref num) =>
Some(ref num) => {
if num < &number {
Consider::Accept
} else {
Consider::RejectPast
},
}
},
}
}
}
@@ -549,17 +550,17 @@ impl<N: Ord> Peers<N> {
) -> Result<Option<&View<N>>, Misbehavior> {
let Some(peer) = self.inner.get_mut(who) else { return Ok(None) };
let invalid_change = peer.view.set_id > update.set_id ||
peer.view.round > update.round && peer.view.set_id == update.set_id ||
peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height);
let invalid_change = peer.view.set_id > update.set_id
|| peer.view.round > update.round && peer.view.set_id == update.set_id
|| peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height);
if invalid_change {
return Err(Misbehavior::InvalidViewChange);
}
let now = Instant::now();
let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) ==
(peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref());
let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height))
== (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref());
if duplicate_packet {
if let Some(last_update) = peer.view.last_update {
@@ -820,8 +821,8 @@ impl<Block: BlockT> Inner<Block> {
ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))),
Some(ref mut v) => {
if v.set_id == set_id {
let diff_authorities = self.authorities.iter().collect::<HashSet<_>>() !=
authorities.iter().collect::<HashSet<_>>();
let diff_authorities = self.authorities.iter().collect::<HashSet<_>>()
!= authorities.iter().collect::<HashSet<_>>();
if diff_authorities {
debug!(
@@ -902,10 +903,12 @@ impl<Block: BlockT> Inner<Block> {
) -> Action<Block::Hash> {
match self.consider_vote(full.round, full.set_id) {
Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()),
Consider::RejectOutOfScope =>
return Action::Discard(Misbehavior::OutOfScopeMessage.cost()),
Consider::RejectPast =>
return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)),
Consider::RejectOutOfScope => {
return Action::Discard(Misbehavior::OutOfScopeMessage.cost())
},
Consider::RejectPast => {
return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id))
},
Consider::Accept => {},
}
@@ -955,15 +958,17 @@ impl<Block: BlockT> Inner<Block> {
match self.consider_global(full.set_id, full.message.target_number) {
Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()),
Consider::RejectPast =>
return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)),
Consider::RejectOutOfScope =>
return Action::Discard(Misbehavior::OutOfScopeMessage.cost()),
Consider::RejectPast => {
return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id))
},
Consider::RejectOutOfScope => {
return Action::Discard(Misbehavior::OutOfScopeMessage.cost())
},
Consider::Accept => {},
}
if full.message.precommits.len() != full.message.auth_data.len() ||
full.message.precommits.is_empty()
if full.message.precommits.len() != full.message.auth_data.len()
|| full.message.precommits.is_empty()
{
debug!(target: LOG_TARGET, "Malformed compact commit");
telemetry!(
@@ -1046,8 +1051,8 @@ impl<Block: BlockT> Inner<Block> {
// race where the peer sent us the request before it observed that
// we had transitioned to a new set. In this case we charge a lower
// cost.
if request.set_id.0.saturating_add(1) == local_view.set_id.0 &&
local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0
if request.set_id.0.saturating_add(1) == local_view.set_id.0
&& local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0
{
return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP));
}
@@ -1057,8 +1062,9 @@ impl<Block: BlockT> Inner<Block> {
match self.peers.peer(who) {
None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())),
Some(peer) if peer.view.round >= request.round =>
return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())),
Some(peer) if peer.view.round >= request.round => {
return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost()))
},
_ => {},
}
@@ -1131,9 +1137,9 @@ impl<Block: BlockT> Inner<Block> {
// won't be able to reply since they don't follow the full GRANDPA
// protocol and therefore might not have the vote data available.
if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) {
if self.catch_up_config.request_allowed(peer) &&
peer.view.set_id == local_view.set_id &&
peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0
if self.catch_up_config.request_allowed(peer)
&& peer.view.set_id == local_view.set_id
&& peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0
{
// send catch up request if allowed
let round = peer.view.round.0 - 1; // peer.view.round is > 0
@@ -1166,8 +1172,9 @@ impl<Block: BlockT> Inner<Block> {
let update_res = self.peers.update_peer_state(who, update);
let (cost_benefit, topics) = match update_res {
Ok(view) =>
(benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::<Block>(view))),
Ok(view) => {
(benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::<Block>(view)))
},
Err(misbehavior) => (misbehavior.cost(), None),
};
@@ -1249,8 +1256,8 @@ impl<Block: BlockT> Inner<Block> {
if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) {
self.peers.first_stage_peers.contains(who)
} else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) {
self.peers.first_stage_peers.contains(who) ||
self.peers.second_stage_peers.contains(who)
self.peers.first_stage_peers.contains(who)
|| self.peers.second_stage_peers.contains(who)
} else {
self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false)
}
@@ -1278,9 +1285,9 @@ impl<Block: BlockT> Inner<Block> {
};
if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) {
self.peers.first_stage_peers.contains(who) ||
self.peers.second_stage_peers.contains(who) ||
self.peers.lucky_light_peers.contains(who)
self.peers.first_stage_peers.contains(who)
|| self.peers.second_stage_peers.contains(who)
|| self.peers.lucky_light_peers.contains(who)
} else {
true
}
@@ -1611,9 +1618,9 @@ impl<Block: BlockT> pezsc_network_gossip::Validator<Block> for GossipValidator<B
// set the peer is in and if the commit is better than the
// last received by peer, additionally we make sure to only
// broadcast our best commit.
peer.view.consider_global(set_id, full.message.target_number) ==
Consider::Accept && Some(&full.message.target_number) ==
local_view.last_commit_height()
peer.view.consider_global(set_id, full.message.target_number)
== Consider::Accept
&& Some(&full.message.target_number) == local_view.last_commit_height()
},
Ok(GossipMessage::Neighbor(_)) => false,
Ok(GossipMessage::CatchUpRequest(_)) => false,
@@ -1646,8 +1653,11 @@ impl<Block: BlockT> pezsc_network_gossip::Validator<Block> for GossipValidator<B
Some((number, round, set_id)) =>
// we expire any commit message that doesn't target the same block
// as our best commit or isn't from the same round and set id
!(full.message.target_number == number &&
full.round == round && full.set_id == set_id),
{
!(full.message.target_number == number
&& full.round == round
&& full.set_id == set_id)
},
None => true,
},
Ok(_) => true,
@@ -2323,8 +2333,8 @@ mod tests {
let test = |rounds_elapsed, peers| {
// rewind n round durations
val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() -
Duration::from_millis(
val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now()
- Duration::from_millis(
(round_duration.as_millis() as f32 * rounds_elapsed) as u64,
);
@@ -505,10 +505,11 @@ impl<B: BlockT, N: Network<B>, S: Syncing<B>> Future for NetworkBridge<B, N, S>
Poll::Ready(Some((to, packet))) => {
self.gossip_engine.lock().send_message(to, packet.encode());
},
Poll::Ready(None) =>
Poll::Ready(None) => {
return Poll::Ready(Err(Error::Network(
"Neighbor packet worker stream closed.".into(),
))),
)))
},
Poll::Pending => break,
}
}
@@ -518,17 +519,19 @@ impl<B: BlockT, N: Network<B>, S: Syncing<B>> Future for NetworkBridge<B, N, S>
Poll::Ready(Some(PeerReport { who, cost_benefit })) => {
self.gossip_engine.lock().report(who, cost_benefit);
},
Poll::Ready(None) =>
Poll::Ready(None) => {
return Poll::Ready(Err(Error::Network(
"Gossip validator report stream closed.".into(),
))),
)))
},
Poll::Pending => break,
}
}
match self.gossip_engine.lock().poll_unpin(cx) {
Poll::Ready(()) =>
return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))),
Poll::Ready(()) => {
return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into())))
},
Poll::Pending => {},
}
@@ -666,10 +669,12 @@ fn incoming_global<B: BlockT>(
})
.filter_map(move |(notification, msg)| {
future::ready(match msg {
GossipMessage::Commit(msg) =>
process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters),
GossipMessage::CatchUp(msg) =>
process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters),
GossipMessage::Commit(msg) => {
process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters)
},
GossipMessage::CatchUp(msg) => {
process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters)
},
_ => {
debug!(target: LOG_TARGET, "Skipping unknown message type");
None
@@ -270,10 +270,11 @@ impl Tester {
futures::future::poll_fn(move |cx| loop {
match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) {
Poll::Ready(None) => panic!("concluded early"),
Poll::Ready(Some(item)) =>
Poll::Ready(Some(item)) => {
if pred(item) {
return Poll::Ready(s.take().unwrap());
},
}
},
Poll::Pending => return Poll::Pending,
}
})
@@ -622,8 +623,9 @@ fn bad_commit_leads_to_report() {
let fut = future::join(send_message, handle_commit)
.then(move |(tester, ())| {
tester.filter_network_events(move |event| match event {
Event::Report(who, cost_benefit) =>
who == id && cost_benefit == super::cost::INVALID_COMMIT,
Event::Report(who, cost_benefit) => {
who == id && cost_benefit == super::cost::INVALID_COMMIT
},
_ => false,
})
})
@@ -277,8 +277,8 @@ impl<Header: HeaderT> HasVoted<Header> {
pub fn propose(&self) -> Option<&PrimaryPropose<Header>> {
match self {
HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose),
HasVoted::Yes(_, Vote::Prevote(propose, _)) |
HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(),
HasVoted::Yes(_, Vote::Prevote(propose, _))
| HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(),
_ => None,
}
}
@@ -286,8 +286,8 @@ impl<Header: HeaderT> HasVoted<Header> {
/// Returns the prevote we should vote with (if any.)
pub fn prevote(&self) -> Option<&Prevote<Header>> {
match self {
HasVoted::Yes(_, Vote::Prevote(_, prevote)) |
HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote),
HasVoted::Yes(_, Vote::Prevote(_, prevote))
| HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote),
_ => None,
}
}
@@ -528,8 +528,9 @@ where
// find the hash of the latest block in the current set
let current_set_latest_hash = match next_change {
Some((_, n)) if n.is_zero() =>
return Err(Error::Safety("Authority set change signalled at genesis.".to_string())),
Some((_, n)) if n.is_zero() => {
return Err(Error::Safety("Authority set change signalled at genesis.".to_string()))
},
// the next set starts at `n` so the current one lasts until `n - 1`. if
// `n` is later than the best block, then the current set is still live
// at best block.
@@ -733,12 +734,13 @@ where
let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref());
let has_voted = match self.voter_set_state.has_voted(round) {
HasVoted::Yes(id, vote) =>
HasVoted::Yes(id, vote) => {
if local_id.as_ref().map(|k| k == &id).unwrap_or(false) {
HasVoted::Yes(id, vote)
} else {
HasVoted::No
},
}
},
HasVoted::No => HasVoted::No,
};
@@ -1253,10 +1255,10 @@ where
let is_descendent_of = is_descendent_of(&*client, None);
if target_header.number() > best_header.number() ||
target_header.number() == best_header.number() &&
target_header.hash() != best_header.hash() ||
!is_descendent_of(&target_header.hash(), &best_header.hash())?
if target_header.number() > best_header.number()
|| target_header.number() == best_header.number()
&& target_header.hash() != best_header.hash()
|| !is_descendent_of(&target_header.hash(), &best_header.hash())?
{
debug!(
target: LOG_TARGET,
@@ -111,9 +111,9 @@ where
self.authority_set.inner().pending_changes().cloned().collect();
for pending_change in pending_changes {
if pending_change.delay_kind == DelayKind::Finalized &&
pending_change.effective_number() > chain_info.finalized_number &&
pending_change.effective_number() <= chain_info.best_number
if pending_change.delay_kind == DelayKind::Finalized
&& pending_change.effective_number() > chain_info.finalized_number
&& pending_change.effective_number() <= chain_info.best_number
{
let effective_block_hash = if !pending_change.delay.is_zero() {
self.select_chain
@@ -685,7 +685,7 @@ where
);
}
},
None =>
None => {
if needs_justification {
debug!(
target: LOG_TARGET,
@@ -694,7 +694,8 @@ where
);
imported_aux.needs_justification = true;
},
}
},
}
Ok(ImportResult::Imported(imported_aux))
@@ -799,8 +800,9 @@ where
let justification = match justification {
Err(e) => {
return match e {
pezsp_blockchain::Error::OutdatedJustification =>
Err(ConsensusError::OutdatedJustification),
pezsp_blockchain::Error::OutdatedJustification => {
Err(ConsensusError::OutdatedJustification)
},
_ => Err(ConsensusError::ClientImport(e.to_string())),
};
},
@@ -832,7 +834,7 @@ where
// send the command to the voter
let _ = self.send_voter_commands.unbounded_send(command);
},
Err(CommandOrError::Error(e)) =>
Err(CommandOrError::Error(e)) => {
return Err(match e {
Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()),
Error::Network(error) => ConsensusError::ClientImport(error),
@@ -842,7 +844,8 @@ where
Error::Signing(error) => ConsensusError::ClientImport(error),
Error::Timer(error) => ConsensusError::ClientImport(error.to_string()),
Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()),
}),
})
},
Ok(_) => {
assert!(
!enacts_change,
@@ -214,12 +214,14 @@ impl<Block: BlockT> GrandpaJustification<Block> {
&mut buf,
);
match signature_result {
pezsp_consensus_grandpa::SignatureResult::Invalid =>
pezsp_consensus_grandpa::SignatureResult::Invalid => {
return Err(ClientError::BadJustification(
"invalid signature for precommit in grandpa justification".to_string(),
)),
pezsp_consensus_grandpa::SignatureResult::OutdatedSet =>
return Err(ClientError::OutdatedJustification),
))
},
pezsp_consensus_grandpa::SignatureResult::OutdatedSet => {
return Err(ClientError::OutdatedJustification)
},
pezsp_consensus_grandpa::SignatureResult::Valid => {},
}
@@ -236,10 +238,11 @@ impl<Block: BlockT> GrandpaJustification<Block> {
visited_hashes.insert(hash);
}
},
_ =>
_ => {
return Err(ClientError::BadJustification(
"invalid precommit ancestry proof in grandpa justification".to_string(),
)),
))
},
}
}
@@ -1231,8 +1231,8 @@ async fn voter_persists_its_votes() {
Pin::new(&mut *round_tx.lock())
.start_send(finality_grandpa::Message::Prevote(prevote))
.unwrap();
} else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() ==
1
} else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap()
== 1
{
// the next message we receive should be our own prevote
let prevote = match signed.message {
@@ -1246,8 +1246,8 @@ async fn voter_persists_its_votes() {
// after alice restarts it should send its previous prevote
// therefore we won't ever receive it again since it will be a
// known message on the gossip layer
} else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() ==
2
} else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap()
== 2
{
// we then receive a precommit from alice for block 15
// even though we casted a prevote for block 30
@@ -940,8 +940,8 @@ mod tests {
let block_sync_requests = block_sync_requester.requests.lock();
// we request blocks targeted by the precommits that aren't imported
if block_sync_requests.contains(&(h2.hash(), *h2.number())) &&
block_sync_requests.contains(&(h3.hash(), *h3.number()))
if block_sync_requests.contains(&(h2.hash(), *h2.number()))
&& block_sync_requests.contains(&(h3.hash(), *h3.number()))
{
return Poll::Ready(());
}
@@ -236,8 +236,8 @@ where
.await
.filter(|(_, restricted_number)| {
// NOTE: we can only restrict votes within the interval [base, target)
restricted_number >= base.number() &&
restricted_number < restricted_target.number()
restricted_number >= base.number()
&& restricted_number < restricted_target.number()
})
.and_then(|(hash, _)| backend.header(hash).ok())
.and_then(std::convert::identity)
@@ -297,14 +297,15 @@ where
// manually hard code epoch descriptor
epoch_descriptor = match epoch_descriptor {
ViableEpochDescriptor::Signaled(identifier, _header) =>
ViableEpochDescriptor::Signaled(identifier, _header) => {
ViableEpochDescriptor::Signaled(
identifier,
EpochHeader {
start_slot: slot,
end_slot: (*slot * self.config.epoch_length).into(),
},
),
)
},
_ => unreachable!(
"we're not in the authorities, so this isn't the genesis epoch; qed"
),
@@ -96,8 +96,9 @@ pub async fn seal_block<B, BI, SC, C, E, TP, CIDP, P>(
// use the parent_hash supplied via `EngineCommand`
// or fetch the best_block.
let parent = match parent_hash {
Some(hash) =>
client.header(hash)?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?,
Some(hash) => {
client.header(hash)?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?
},
None => select_chain.best_chain().await?,
};
+12 -8
View File
@@ -285,8 +285,9 @@ where
CheckInherentsError::CreateInherentData(e) => Error::CreateInherents(e),
CheckInherentsError::Client(e) => Error::Client(e.into()),
CheckInherentsError::CheckInherents(e) => Error::CheckInherents(e),
CheckInherentsError::CheckInherentsUnknownError(id) =>
Error::CheckInherentsUnknownError(id),
CheckInherentsError::CheckInherentsUnknownError(id) => {
Error::CheckInherentsUnknownError(id)
},
})?;
Ok(())
@@ -409,12 +410,13 @@ impl<B: BlockT, Algorithm> PowVerifier<B, Algorithm> {
let hash = header.hash();
let (seal, inner_seal) = match header.digest_mut().pop() {
Some(DigestItem::Seal(id, seal)) =>
Some(DigestItem::Seal(id, seal)) => {
if id == POW_ENGINE_ID {
(DigestItem::Seal(id, seal.clone()), seal)
} else {
return Err(Error::WrongEngine(id));
},
}
},
_ => return Err(Error::HeaderUnsealed(hash)),
};
@@ -646,8 +648,9 @@ fn find_pre_digest<B: BlockT>(header: &B::Header) -> Result<Option<Vec<u8>>, Err
for log in header.digest().logs() {
trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log);
match (log, pre_digest.is_some()) {
(DigestItem::PreRuntime(POW_ENGINE_ID, _), true) =>
return Err(Error::MultiplePreRuntimeDigests),
(DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => {
return Err(Error::MultiplePreRuntimeDigests)
},
(DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => {
pre_digest = Some(v.clone());
},
@@ -661,12 +664,13 @@ fn find_pre_digest<B: BlockT>(header: &B::Header) -> Result<Option<Vec<u8>>, Err
/// Fetch PoW seal.
fn fetch_seal<B: BlockT>(digest: Option<&DigestItem>, hash: B::Hash) -> Result<Vec<u8>, Error<B>> {
match digest {
Some(DigestItem::Seal(id, seal)) =>
Some(DigestItem::Seal(id, seal)) => {
if id == &POW_ENGINE_ID {
Ok(seal.clone())
} else {
Err(Error::<B>::WrongEngine(*id))
},
}
},
_ => Err(Error::<B>::HeaderUnsealed(hash)),
}
}
+3 -3
View File
@@ -335,9 +335,9 @@ pub trait SimpleSlotWorker<B: BlockT> {
let authorities_len = self.authorities_len(&aux_data);
if !self.force_authoring() &&
self.sync_oracle().is_offline() &&
authorities_len.map(|a| a > 1).unwrap_or(false)
if !self.force_authoring()
&& self.sync_oracle().is_offline()
&& authorities_len.map(|a| a > 1).unwrap_or(false)
{
debug!(target: logging_target, "Skipping proposal slot. Waiting for the network.");
telemetry!(
+50 -40
View File
@@ -598,10 +598,11 @@ impl<Block: BlockT> BlockchainDb<Block> {
)? {
Some(justifications) => match Decode::decode(&mut &justifications[..]) {
Ok(justifications) => Ok(Some(justifications)),
Err(err) =>
Err(err) => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Error decoding justifications: {err}"
))),
)))
},
},
None => Ok(None),
}
@@ -614,10 +615,11 @@ impl<Block: BlockT> BlockchainDb<Block> {
// Plain body
match Decode::decode(&mut &body[..]) {
Ok(body) => return Ok(Some(body)),
Err(err) =>
Err(err) => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Error decoding body: {err}"
))),
)))
},
}
}
@@ -646,10 +648,11 @@ impl<Block: BlockT> BlockchainDb<Block> {
)?;
body.push(ex);
},
None =>
None => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Missing indexed transaction {hash:?}"
))),
)))
},
};
},
DbExtrinsic::Full(ex) => {
@@ -659,10 +662,11 @@ impl<Block: BlockT> BlockchainDb<Block> {
}
return Ok(Some(body));
},
Err(err) =>
Err(err) => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Error decoding body list: {err}",
))),
)))
},
}
}
Ok(None)
@@ -777,17 +781,19 @@ impl<Block: BlockT> pezsc_client_api::blockchain::Backend<Block> for BlockchainD
if let DbExtrinsic::Indexed { hash, .. } = ex {
match self.db.get(columns::TRANSACTION, hash.as_ref()) {
Some(t) => transactions.push(t),
None =>
None => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Missing indexed transaction {hash:?}",
))),
)))
},
}
}
}
Ok(Some(transactions))
},
Err(err) =>
Err(pezsp_blockchain::Error::Backend(format!("Error decoding body list: {err}"))),
Err(err) => {
Err(pezsp_blockchain::Error::Backend(format!("Error decoding body list: {err}")))
},
}
}
}
@@ -851,8 +857,9 @@ impl<Block: BlockT> BlockImportOperation<Block> {
count += 1;
let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
match value_operation {
OffchainOverlayedChange::SetValue(val) =>
transaction.set_from_vec(columns::OFFCHAIN, &key, val),
OffchainOverlayedChange::SetValue(val) => {
transaction.set_from_vec(columns::OFFCHAIN, &key, val)
},
OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
}
}
@@ -1275,9 +1282,9 @@ impl<Block: BlockT> Backend<Block> {
// Older DB versions have no last state key. Check if the state is available and set it.
let info = backend.blockchain.info();
if info.finalized_state.is_none() &&
info.finalized_hash != Default::default() &&
pezsc_client_api::Backend::have_state_at(
if info.finalized_state.is_none()
&& info.finalized_hash != Default::default()
&& pezsc_client_api::Backend::have_state_at(
&backend,
info.finalized_hash,
info.finalized_number,
@@ -1316,8 +1323,8 @@ impl<Block: BlockT> Backend<Block> {
let meta = self.blockchain.meta.read();
if meta.best_number.saturating_sub(best_number).saturated_into::<u64>() >
self.canonicalization_delay
if meta.best_number.saturating_sub(best_number).saturated_into::<u64>()
> self.canonicalization_delay
{
return Err(pezsp_blockchain::Error::SetHeadTooOld);
}
@@ -1377,8 +1384,8 @@ impl<Block: BlockT> Backend<Block> {
) -> ClientResult<()> {
let last_finalized =
last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
if last_finalized != self.blockchain.meta.read().genesis_hash &&
*header.parent_hash() != last_finalized
if last_finalized != self.blockchain.meta.read().genesis_hash
&& *header.parent_hash() != last_finalized
{
return Err(pezsp_blockchain::Error::NonSequentialFinalization(format!(
"Last finalized {last_finalized:?} not parent of {:?}",
@@ -1649,8 +1656,8 @@ impl<Block: BlockT> Backend<Block> {
let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
finalized
} else {
(number.is_zero() && last_finalized_num.is_zero()) ||
pending_block.leaf_state.is_final()
(number.is_zero() && last_finalized_num.is_zero())
|| pending_block.leaf_state.is_final()
};
let header = &pending_block.header;
@@ -1732,7 +1739,7 @@ impl<Block: BlockT> Backend<Block> {
if let Some(mut gap) = block_gap {
match gap.gap_type {
BlockGapType::MissingHeaderAndBody =>
BlockGapType::MissingHeaderAndBody => {
if number == gap.start {
gap.start += One::one();
utils::insert_number_to_key_mapping(
@@ -1751,7 +1758,8 @@ impl<Block: BlockT> Backend<Block> {
debug!(target: "db", "Update block gap. {block_gap:?}");
}
block_gap_updated = true;
},
}
},
BlockGapType::MissingBody => {
// Gap increased when syncing the header chain during fast sync.
if number == gap.end + One::one() && !existing_body {
@@ -1782,8 +1790,8 @@ impl<Block: BlockT> Backend<Block> {
},
}
} else if operation.create_gap {
if number > best_num + One::one() &&
self.blockchain.header(parent_hash)?.is_none()
if number > best_num + One::one()
&& self.blockchain.header(parent_hash)?.is_none()
{
let gap = BlockGap {
start: best_num + One::one(),
@@ -1793,9 +1801,9 @@ impl<Block: BlockT> Backend<Block> {
insert_new_gap(&mut transaction, gap, &mut block_gap);
block_gap_updated = true;
debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
} else if number == best_num + One::one() &&
self.blockchain.header(parent_hash)?.is_some() &&
!existing_body
} else if number == best_num + One::one()
&& self.blockchain.header(parent_hash)?.is_some()
&& !existing_body
{
let gap = BlockGap {
start: number,
@@ -1893,8 +1901,8 @@ impl<Block: BlockT> Backend<Block> {
LastCanonicalized::NotCanonicalizing => false,
};
if requires_canonicalization &&
pezsc_client_api::Backend::have_state_at(self, f_hash, f_num)
if requires_canonicalization
&& pezsc_client_api::Backend::have_state_at(self, f_hash, f_num)
{
let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
pezsp_blockchain::Error::from_state_db::<
@@ -2000,16 +2008,18 @@ impl<Block: BlockT> Backend<Block> {
id,
)?;
match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
Ok(index) =>
Ok(index) => {
for ex in index {
if let DbExtrinsic::Indexed { hash, .. } = ex {
transaction.release(columns::TRANSACTION, hash);
}
},
Err(err) =>
}
},
Err(err) => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Error decoding body list: {err}",
))),
)))
},
}
}
Ok(())
@@ -2234,8 +2244,8 @@ impl<Block: BlockT> pezsc_client_api::backend::Backend<Block> for Backend<Block>
let last_finalized = self.blockchain.last_finalized()?;
// We can do a quick check first, before doing a proper but more expensive check
if number > self.blockchain.info().finalized_number ||
(hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
if number > self.blockchain.info().finalized_number
|| (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
{
return Err(ClientError::NotInFinalizedChain);
}
@@ -2370,8 +2380,8 @@ impl<Block: BlockT> pezsc_client_api::backend::Backend<Block> for Backend<Block>
reverted_finalized.insert(removed_hash);
if let Some((hash, _)) = self.blockchain.info().finalized_state {
if hash == hash_to_revert {
if !number_to_revert.is_zero() &&
self.have_state_at(prev_hash, prev_number)
if !number_to_revert.is_zero()
&& self.have_state_at(prev_hash, prev_number)
{
let lookup_key = utils::number_and_hash_to_lookup_key(
prev_number,
+6 -4
View File
@@ -97,7 +97,7 @@ impl<H: Clone + AsRef<[u8]>> Database<H> for DbAdapter {
Some(match change {
Change::Set(col, key, value) => (col as u8, key, Some(value)),
Change::Remove(col, key) => (col as u8, key, None),
Change::Store(col, key, value) =>
Change::Store(col, key, value) => {
if ref_counted_column(col) {
(col as u8, key.as_ref().to_vec(), Some(value))
} else {
@@ -105,7 +105,8 @@ impl<H: Clone + AsRef<[u8]>> Database<H> for DbAdapter {
not_ref_counted_column.push(col);
}
return None;
},
}
},
Change::Reference(col, key) => {
if ref_counted_column(col) {
// FIXME accessing value is not strictly needed, optimize this in parity-db.
@@ -118,7 +119,7 @@ impl<H: Clone + AsRef<[u8]>> Database<H> for DbAdapter {
return None;
}
},
Change::Release(col, key) =>
Change::Release(col, key) => {
if ref_counted_column(col) {
(col as u8, key.as_ref().to_vec(), None)
} else {
@@ -126,7 +127,8 @@ impl<H: Clone + AsRef<[u8]>> Database<H> for DbAdapter {
not_ref_counted_column.push(col);
}
return None;
},
}
},
})
}));
+3 -2
View File
@@ -163,8 +163,9 @@ fn migrate_3_to_4<Block: BlockT>(db_path: &Path, _db_type: DatabaseType) -> Upgr
/// If the file does not exist returns 0.
fn current_version(path: &Path) -> UpgradeResult<u32> {
match fs::File::open(version_file_path(path)) {
Err(ref err) if err.kind() == ErrorKind::NotFound =>
Err(UpgradeError::MissingDatabaseVersionFile),
Err(ref err) if err.kind() == ErrorKind::NotFound => {
Err(UpgradeError::MissingDatabaseVersionFile)
},
Err(_) => Err(UpgradeError::UnknownDatabaseVersion),
Ok(mut file) => {
let mut s = String::new();
+17 -12
View File
@@ -202,8 +202,9 @@ fn open_database_at<Block: BlockT>(
let db: Arc<dyn Database<DbHash>> = match &db_source {
DatabaseSource::ParityDb { path } => open_parity_db::<Block>(path, db_type, create)?,
#[cfg(feature = "rocksdb")]
DatabaseSource::RocksDb { path, cache_size } =>
open_kvdb_rocksdb::<Block>(path, db_type, create, *cache_size)?,
DatabaseSource::RocksDb { path, cache_size } => {
open_kvdb_rocksdb::<Block>(path, db_type, create, *cache_size)?
},
DatabaseSource::Custom { db, require_create_flag } => {
if *require_create_flag && !create {
return Err(OpenDbError::DoesNotExist);
@@ -214,8 +215,9 @@ fn open_database_at<Block: BlockT>(
// check if rocksdb exists first, if not, open paritydb
match open_kvdb_rocksdb::<Block>(rocksdb_path, db_type, false, *cache_size) {
Ok(db) => db,
Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) =>
open_parity_db::<Block>(paritydb_path, db_type, create)?,
Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => {
open_parity_db::<Block>(paritydb_path, db_type, create)?
},
Err(as_is) => return Err(as_is),
}
},
@@ -368,13 +370,14 @@ pub fn check_database_type(
db_type: DatabaseType,
) -> Result<(), OpenDbError> {
match db.get(COLUMN_META, meta_keys::TYPE) {
Some(stored_type) =>
Some(stored_type) => {
if db_type.as_str().as_bytes() != &*stored_type {
return Err(OpenDbError::UnexpectedDbType {
expected: db_type,
found: stored_type.to_owned(),
});
},
}
},
None => {
let mut transaction = Transaction::new();
transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes());
@@ -396,8 +399,8 @@ fn maybe_migrate_to_type_subdir<Block: BlockT>(
// Do we have to migrate to a database-type-based subdirectory layout:
// See if there's a file identifying a rocksdb or paritydb folder in the parent dir and
// the target path ends in a role specific directory
if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) &&
(p.ends_with(DatabaseType::Full.as_str()))
if (basedir.join("db_version").exists() || basedir.join("metadata").exists())
&& (p.ends_with(DatabaseType::Full.as_str()))
{
// Try to open the database to check if the current `DatabaseType` matches the type of
// database stored in the target directory and close the database on success.
@@ -487,7 +490,7 @@ where
{
let genesis_hash: Block::Hash = match read_genesis_hash(db)? {
Some(genesis_hash) => genesis_hash,
None =>
None => {
return Ok(Meta {
best_hash: Default::default(),
best_number: Zero::zero(),
@@ -496,7 +499,8 @@ where
genesis_hash: Default::default(),
finalized_state: None,
block_gap: None,
}),
})
},
};
let load_meta_block = |desc, key| -> Result<_, pezsp_blockchain::Error> {
@@ -546,10 +550,11 @@ where
BLOCK_GAP_CURRENT_VERSION => db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok()),
v =>
v => {
return Err(pezsp_blockchain::Error::Backend(format!(
"Unsupported block gap DB version: {v}"
))),
)))
},
},
};
debug!(target: "db", "block_gap={:?}", block_gap);
@@ -191,8 +191,9 @@ impl RuntimeBlob {
/// Consumes this runtime blob and serializes it.
pub fn serialize(self) -> Vec<u8> {
match self.0 {
BlobKind::WebAssembly(raw_module) =>
serialize(raw_module).expect("serializing into a vec should succeed; qed"),
BlobKind::WebAssembly(raw_module) => {
serialize(raw_module).expect("serializing into a vec should succeed; qed")
},
BlobKind::PolkaVM(ref blob) => blob.1.to_vec(),
}
}
+24 -16
View File
@@ -45,11 +45,12 @@ impl WasmInstance for Instance {
) -> (Result<Vec<u8>, Error>, Option<AllocationStats>) {
let pc = match self.0.module().exports().find(|e| e.symbol() == name) {
Some(export) => export.program_counter(),
None =>
None => {
return (
Err(format!("cannot call into the runtime: export not found: '{name}'").into()),
None,
),
)
},
};
let Ok(raw_data_length) = u32::try_from(raw_data.len()) else {
@@ -93,21 +94,24 @@ impl WasmInstance for Instance {
match self.0.call_typed(&mut (), pc, (data_pointer, raw_data_length)) {
Ok(()) => {},
Err(CallError::Trap) =>
Err(CallError::Trap) => {
return (
Err(format!("call into the runtime method '{name}' failed: trap").into()),
None,
),
Err(CallError::Error(err)) =>
)
},
Err(CallError::Error(err)) => {
return (
Err(format!("call into the runtime method '{name}' failed: {err}").into()),
None,
),
Err(CallError::User(err)) =>
)
},
Err(CallError::User(err)) => {
return (
Err(format!("call into the runtime method '{name}' failed: {err}").into()),
None,
),
)
},
Err(CallError::NotEnoughGas) => unreachable!("gas metering is never enabled"),
Err(CallError::Step) => unreachable!("stepping is never enabled"),
};
@@ -190,7 +194,7 @@ fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Resul
args[nth_arg] = Value::F32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as u32);
nth_reg += 1;
},
ValueType::I64 =>
ValueType::I64 => {
if caller.instance.is_64_bit() {
args[nth_arg] = Value::I64(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i64);
nth_reg += 1;
@@ -203,8 +207,9 @@ fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Resul
args[nth_arg] =
Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64);
},
ValueType::F64 =>
}
},
ValueType::F64 => {
if caller.instance.is_64_bit() {
args[nth_arg] = Value::F64(caller.instance.reg(Reg::ARG_REGS[nth_reg]));
nth_reg += 1;
@@ -216,7 +221,8 @@ fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Resul
nth_reg += 1;
args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32));
},
}
},
}
}
@@ -244,20 +250,22 @@ fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Resul
Value::F32(value) => {
caller.instance.set_reg(Reg::A0, value as u64);
},
Value::I64(value) =>
Value::I64(value) => {
if caller.instance.is_64_bit() {
caller.instance.set_reg(Reg::A0, value as u64);
} else {
caller.instance.set_reg(Reg::A0, value as u64);
caller.instance.set_reg(Reg::A1, (value >> 32) as u64);
},
Value::F64(value) =>
}
},
Value::F64(value) => {
if caller.instance.is_64_bit() {
caller.instance.set_reg(Reg::A0, value as u64);
} else {
caller.instance.set_reg(Reg::A0, value as u64);
caller.instance.set_reg(Reg::A1, (value >> 32) as u64);
},
}
},
}
}
@@ -119,8 +119,9 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) {
match call_in_wasm("test_calling_missing_external", &[], wasm_method, &mut ext).unwrap_err() {
Error::AbortedDueToTrap(error) => {
let expected = match wasm_method {
WasmExecutionMethod::Compiled { .. } =>
"call to a missing function env:missing_external",
WasmExecutionMethod::Compiled { .. } => {
"call to a missing function env:missing_external"
},
};
assert_eq!(error.message, expected);
},
@@ -138,8 +139,9 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) {
{
Error::AbortedDueToTrap(error) => {
let expected = match wasm_method {
WasmExecutionMethod::Compiled { .. } =>
"call to a missing function env:yet_another_missing_external",
WasmExecutionMethod::Compiled { .. } => {
"call to a missing function env:yet_another_missing_external"
},
};
assert_eq!(error.message, expected);
},
@@ -728,8 +730,9 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) {
match call_in_wasm("test_unreachable_intrinsic", &[], wasm_method, &mut ext).unwrap_err() {
Error::AbortedDueToTrap(error) => {
let expected = match wasm_method {
WasmExecutionMethod::Compiled { .. } =>
"wasm trap: wasm `unreachable` instruction executed",
WasmExecutionMethod::Compiled { .. } => {
"wasm trap: wasm `unreachable` instruction executed"
},
};
assert_eq!(error.message, expected);
},
@@ -303,7 +303,7 @@ where
}
match wasm_method {
WasmExecutionMethod::Compiled { instantiation_strategy } =>
WasmExecutionMethod::Compiled { instantiation_strategy } => {
pezsc_executor_wasmtime::create_runtime::<H>(
blob,
pezsc_executor_wasmtime::Config {
@@ -322,7 +322,8 @@ where
},
},
)
.map(|runtime| -> Box<dyn WasmModule> { Box::new(runtime) }),
.map(|runtime| -> Box<dyn WasmModule> { Box::new(runtime) })
},
}
}
@@ -48,12 +48,13 @@ where
ExternType::Func(func_ty) => {
pending_func_imports.insert(name.to_owned(), (import_ty, func_ty));
},
_ =>
_ => {
return Err(WasmError::Other(format!(
"host doesn't provide any non function imports: {}:{}",
import_ty.module(),
name,
))),
)))
},
};
}
@@ -274,8 +274,9 @@ fn common_config(semantics: &Semantics) -> std::result::Result<wasmtime::Config,
config.memory_init_cow(use_cow);
config.memory_guaranteed_dense_image_size(match semantics.heap_alloc_strategy {
HeapAllocStrategy::Dynamic { maximum_pages } =>
maximum_pages.map(|p| p as u64 * WASM_PAGE_SIZE).unwrap_or(u64::MAX),
HeapAllocStrategy::Dynamic { maximum_pages } => {
maximum_pages.map(|p| p as u64 * WASM_PAGE_SIZE).unwrap_or(u64::MAX)
},
HeapAllocStrategy::Static { .. } => u64::MAX,
});
@@ -283,8 +284,9 @@ fn common_config(semantics: &Semantics) -> std::result::Result<wasmtime::Config,
const MAX_WASM_PAGES: u64 = 0x10000;
let memory_pages = match semantics.heap_alloc_strategy {
HeapAllocStrategy::Dynamic { maximum_pages } =>
maximum_pages.map(|p| p as u64).unwrap_or(MAX_WASM_PAGES),
HeapAllocStrategy::Dynamic { maximum_pages } => {
maximum_pages.map(|p| p as u64).unwrap_or(MAX_WASM_PAGES)
},
HeapAllocStrategy::Static { .. } => MAX_WASM_PAGES,
};
@@ -579,11 +581,12 @@ where
.map_err(|e| WasmError::Other(format!("cannot create module: {:#}", e)))?;
match config.semantics.instantiation_strategy {
InstantiationStrategy::Pooling |
InstantiationStrategy::PoolingCopyOnWrite |
InstantiationStrategy::RecreateInstance |
InstantiationStrategy::RecreateInstanceCopyOnWrite =>
(module, InternalInstantiationStrategy::Builtin),
InstantiationStrategy::Pooling
| InstantiationStrategy::PoolingCopyOnWrite
| InstantiationStrategy::RecreateInstance
| InstantiationStrategy::RecreateInstanceCopyOnWrite => {
(module, InternalInstantiationStrategy::Builtin)
},
}
},
CodeSupplyMode::Precompiled(compiled_artifact_path) => {
@@ -116,8 +116,9 @@ pub(crate) fn replace_strategy_if_broken(strategy: &mut InstantiationStrategy) {
// These strategies require a working `madvise` to be sound.
InstantiationStrategy::PoolingCopyOnWrite => InstantiationStrategy::Pooling,
InstantiationStrategy::RecreateInstanceCopyOnWrite =>
InstantiationStrategy::RecreateInstance,
InstantiationStrategy::RecreateInstanceCopyOnWrite => {
InstantiationStrategy::RecreateInstance
},
};
use std::sync::OnceLock;
+11 -7
View File
@@ -106,6 +106,7 @@ impl<B: BlockT> InformantDisplay<B> {
// Handle all phases besides the two phases we already handle above.
(_, _, Some(warp))
if !matches!(warp.phase, WarpSyncPhase::DownloadingBlocks(_)) =>
{
(
"",
"Warping".into(),
@@ -114,7 +115,8 @@ impl<B: BlockT> InformantDisplay<B> {
warp.phase,
(warp.total_bytes as f32) / (1024f32 * 1024f32)
),
),
)
},
(_, Some(state), _) => (
"⚙️ ",
"State sync".into(),
@@ -126,10 +128,12 @@ impl<B: BlockT> InformantDisplay<B> {
),
),
(SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()),
(SyncState::Downloading { target }, _, _) =>
("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")),
(SyncState::Importing { target }, _, _) =>
("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")),
(SyncState::Downloading { target }, _, _) => {
("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}"))
},
(SyncState::Importing { target }, _, _) => {
("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}"))
},
};
info!(
@@ -176,8 +180,8 @@ fn speed<B: BlockT>(
let speed = diff
.saturating_mul(10_000)
.checked_div(u128::from(elapsed_ms))
.map_or(0.0, |s| s as f64) /
10.0;
.map_or(0.0, |s| s as f64)
/ 10.0;
format!(" {:4.1} bps", speed)
} else {
// If the number of blocks can't be converted to a regular integer, then we need a more
+3 -2
View File
@@ -64,8 +64,9 @@ impl From<Error> for TraitError {
fn from(error: Error) -> Self {
match error {
Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id),
Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch =>
TraitError::ValidationError(error.to_string()),
Error::InvalidSeed | Error::InvalidPhrase | Error::PublicKeyMismatch => {
TraitError::ValidationError(error.to_string())
},
Error::Unavailable => TraitError::Unavailable,
Error::Io(e) => TraitError::Other(e.to_string()),
Error::Json(e) => TraitError::Other(e.to_string()),
@@ -270,8 +270,8 @@ where
/// Converts an mmr-specific error into a [`CallError`].
fn mmr_error_into_rpc_error(err: MmrError) -> ErrorObjectOwned {
let error_code = MMR_ERROR +
match err {
let error_code = MMR_ERROR
+ match err {
MmrError::LeafNotFound => 1,
MmrError::GenerateProof => 2,
MmrError::Verify => 3,
@@ -65,8 +65,12 @@ where
match version {
None => (),
Some(1) => return load_decode::<_, PersistedState<B>>(backend, GADGET_STATE),
other =>
return Err(ClientError::Backend(format!("Unsupported MMR aux DB version: {:?}", other))),
other => {
return Err(ClientError::Backend(format!(
"Unsupported MMR aux DB version: {:?}",
other
)))
},
}
// No persistent state found in DB.
@@ -94,8 +94,9 @@ impl MmrBlock {
node,
self.parent_hash(),
),
OffchainKeyType::Canon =>
NodesUtils::node_canon_offchain_key(MockRuntimeApi::INDEXING_PREFIX, node),
OffchainKeyType::Canon => {
NodesUtils::node_canon_offchain_key(MockRuntimeApi::INDEXING_PREFIX, node)
},
}
}
}
@@ -78,14 +78,15 @@ impl MaybeInfDelay {
},
Inner::Finite(delay) => delay.reset(duration),
},
None =>
None => {
self.0 = match std::mem::replace(
&mut self.0,
Inner::Infinite { waker: None, delay: None },
) {
Inner::Finite(delay) => Inner::Infinite { waker: None, delay: Some(delay) },
infinite => infinite,
},
}
},
}
}
}
+3 -2
View File
@@ -92,8 +92,9 @@ impl mixnet::request_manager::Request for Request {
fn with_data<T>(&self, f: impl FnOnce(Scattered<u8>) -> T, _context: &Self::Context) -> T {
match self {
Request::SubmitExtrinsic { extrinsic, .. } =>
f([&[SUBMIT_EXTRINSIC][..], extrinsic.0.as_slice()].as_slice().into()),
Request::SubmitExtrinsic { extrinsic, .. } => {
f([&[SUBMIT_EXTRINSIC][..], extrinsic.0.as_slice()].as_slice().into())
},
}
}
@@ -220,7 +220,7 @@ impl<B: BlockT> Future for GossipEngine<B> {
},
NotificationEvent::NotificationStreamOpened {
peer, handshake, ..
} =>
} => {
if let Some(role) = this.network.peer_role(peer, handshake) {
this.state_machine.new_peer(
&mut this.notification_service,
@@ -229,7 +229,8 @@ impl<B: BlockT> Future for GossipEngine<B> {
);
} else {
log::debug!(target: "gossip", "role for {peer} couldn't be determined");
},
}
},
NotificationEvent::NotificationStreamClosed { peer } => {
this.state_machine
.peer_disconnected(&mut this.notification_service, peer);
@@ -254,10 +255,12 @@ impl<B: BlockT> Future for GossipEngine<B> {
match sync_event_stream {
Poll::Ready(Some(event)) => match event {
SyncEvent::PeerConnected(remote) =>
this.network.add_set_reserved(remote, this.protocol.clone()),
SyncEvent::PeerDisconnected(remote) =>
this.network.remove_set_reserved(remote, this.protocol.clone()),
SyncEvent::PeerConnected(remote) => {
this.network.add_set_reserved(remote, this.protocol.clone())
},
SyncEvent::PeerDisconnected(remote) => {
this.network.remove_set_reserved(remote, this.protocol.clone())
},
},
// The sync event stream closed. Do the same for [`GossipValidator`].
Poll::Ready(None) => {
@@ -116,12 +116,13 @@ where
for (id, ref mut peer) in peers.iter_mut() {
for (message_hash, topic, message) in messages.clone() {
let intent = match intent {
MessageIntent::Broadcast { .. } =>
MessageIntent::Broadcast { .. } => {
if peer.known_messages.contains(message_hash) {
continue;
} else {
MessageIntent::Broadcast
},
}
},
MessageIntent::PeriodicRebroadcast => {
if peer.known_messages.contains(message_hash) {
MessageIntent::PeriodicRebroadcast
@@ -149,14 +149,18 @@ where
let request = schema::v1::light::Request::decode(&payload[..])?;
let response = match &request.request {
Some(schema::v1::light::request::Request::RemoteCallRequest(r)) =>
self.on_remote_call_request(&peer, r)?,
Some(schema::v1::light::request::Request::RemoteReadRequest(r)) =>
self.on_remote_read_request(&peer, r)?,
Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) =>
self.on_remote_read_child_request(&peer, r)?,
None =>
return Err(HandleRequestError::BadRequest("Remote request without request data.")),
Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => {
self.on_remote_call_request(&peer, r)?
},
Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => {
self.on_remote_read_request(&peer, r)?
},
Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => {
self.on_remote_read_child_request(&peer, r)?
},
None => {
return Err(HandleRequestError::BadRequest("Remote request without request data."))
},
};
let mut data = Vec::new();
+45 -30
View File
@@ -360,10 +360,12 @@ impl From<CustomMessageOutcome> for BehaviourOut {
set_id,
notifications_sink,
} => BehaviourOut::NotificationStreamReplaced { remote, set_id, notifications_sink },
CustomMessageOutcome::NotificationStreamClosed { remote, set_id } =>
BehaviourOut::NotificationStreamClosed { remote, set_id },
CustomMessageOutcome::NotificationsReceived { remote, set_id, notification } =>
BehaviourOut::NotificationsReceived { remote, set_id, notification },
CustomMessageOutcome::NotificationStreamClosed { remote, set_id } => {
BehaviourOut::NotificationStreamClosed { remote, set_id }
},
CustomMessageOutcome::NotificationsReceived { remote, set_id, notification } => {
BehaviourOut::NotificationsReceived { remote, set_id, notification }
},
}
}
}
@@ -371,12 +373,15 @@ impl From<CustomMessageOutcome> for BehaviourOut {
impl From<request_responses::Event> for BehaviourOut {
fn from(event: request_responses::Event) -> Self {
match event {
request_responses::Event::InboundRequest { protocol, result, .. } =>
BehaviourOut::InboundRequest { protocol, result },
request_responses::Event::RequestFinished { protocol, duration, result, .. } =>
BehaviourOut::RequestFinished { protocol, duration, result },
request_responses::Event::ReputationChanges { peer, changes } =>
BehaviourOut::ReputationChanges { peer, changes },
request_responses::Event::InboundRequest { protocol, result, .. } => {
BehaviourOut::InboundRequest { protocol, result }
},
request_responses::Event::RequestFinished { protocol, duration, result, .. } => {
BehaviourOut::RequestFinished { protocol, duration, result }
},
request_responses::Event::ReputationChanges { peer, changes } => {
BehaviourOut::ReputationChanges { peer, changes }
},
}
}
}
@@ -409,25 +414,33 @@ impl From<DiscoveryOut> for BehaviourOut {
),
Some(duration),
),
DiscoveryOut::ClosestPeersNotFound(target, duration) =>
BehaviourOut::Dht(DhtEvent::ClosestPeersNotFound(target.into()), Some(duration)),
DiscoveryOut::ValueFound(results, duration) =>
BehaviourOut::Dht(DhtEvent::ValueFound(results.into()), Some(duration)),
DiscoveryOut::ValueNotFound(key, duration) =>
BehaviourOut::Dht(DhtEvent::ValueNotFound(key.into()), Some(duration)),
DiscoveryOut::ValuePut(key, duration) =>
BehaviourOut::Dht(DhtEvent::ValuePut(key.into()), Some(duration)),
DiscoveryOut::PutRecordRequest(record_key, record_value, publisher, expires) =>
DiscoveryOut::ClosestPeersNotFound(target, duration) => {
BehaviourOut::Dht(DhtEvent::ClosestPeersNotFound(target.into()), Some(duration))
},
DiscoveryOut::ValueFound(results, duration) => {
BehaviourOut::Dht(DhtEvent::ValueFound(results.into()), Some(duration))
},
DiscoveryOut::ValueNotFound(key, duration) => {
BehaviourOut::Dht(DhtEvent::ValueNotFound(key.into()), Some(duration))
},
DiscoveryOut::ValuePut(key, duration) => {
BehaviourOut::Dht(DhtEvent::ValuePut(key.into()), Some(duration))
},
DiscoveryOut::PutRecordRequest(record_key, record_value, publisher, expires) => {
BehaviourOut::Dht(
DhtEvent::PutRecordRequest(record_key.into(), record_value, publisher, expires),
None,
),
DiscoveryOut::ValuePutFailed(key, duration) =>
BehaviourOut::Dht(DhtEvent::ValuePutFailed(key.into()), Some(duration)),
DiscoveryOut::StartedProviding(key, duration) =>
BehaviourOut::Dht(DhtEvent::StartedProviding(key.into()), Some(duration)),
DiscoveryOut::StartProvidingFailed(key, duration) =>
BehaviourOut::Dht(DhtEvent::StartProvidingFailed(key.into()), Some(duration)),
)
},
DiscoveryOut::ValuePutFailed(key, duration) => {
BehaviourOut::Dht(DhtEvent::ValuePutFailed(key.into()), Some(duration))
},
DiscoveryOut::StartedProviding(key, duration) => {
BehaviourOut::Dht(DhtEvent::StartedProviding(key.into()), Some(duration))
},
DiscoveryOut::StartProvidingFailed(key, duration) => {
BehaviourOut::Dht(DhtEvent::StartProvidingFailed(key.into()), Some(duration))
},
DiscoveryOut::ProvidersFound(key, providers, duration) => BehaviourOut::Dht(
DhtEvent::ProvidersFound(
key.into(),
@@ -435,10 +448,12 @@ impl From<DiscoveryOut> for BehaviourOut {
),
Some(duration),
),
DiscoveryOut::NoMoreProviders(key, duration) =>
BehaviourOut::Dht(DhtEvent::NoMoreProviders(key.into()), Some(duration)),
DiscoveryOut::ProvidersNotFound(key, duration) =>
BehaviourOut::Dht(DhtEvent::ProvidersNotFound(key.into()), Some(duration)),
DiscoveryOut::NoMoreProviders(key, duration) => {
BehaviourOut::Dht(DhtEvent::NoMoreProviders(key.into()), Some(duration))
},
DiscoveryOut::ProvidersNotFound(key, duration) => {
BehaviourOut::Dht(DhtEvent::ProvidersNotFound(key.into()), Some(duration))
},
DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted,
}
}
+3 -3
View File
@@ -197,9 +197,9 @@ impl<B: BlockT> BitswapRequestHandler<B> {
},
};
if cid.version() != cid::Version::V1 ||
cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) ||
cid.hash().size() != 32
if cid.version() != cid::Version::V1
|| cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256)
|| cid.hash().size() != 32
{
debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid);
continue;
+8 -6
View File
@@ -584,8 +584,9 @@ impl DiscoveryBehaviour {
let ip = match addr.iter().next() {
Some(Protocol::Ip4(ip)) => IpNetwork::from(ip),
Some(Protocol::Ip6(ip)) => IpNetwork::from(ip),
Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) =>
return true,
Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => {
return true
},
_ => return false,
};
ip.is_global()
@@ -939,7 +940,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
// We are not interested in this event at the moment.
},
KademliaEvent::InboundRequest { request } => match request {
libp2p::kad::InboundRequest::PutRecord { record: Some(record), .. } =>
libp2p::kad::InboundRequest::PutRecord { record: Some(record), .. } => {
return Poll::Ready(ToSwarm::GenerateEvent(
DiscoveryOut::PutRecordRequest(
record.key,
@@ -947,7 +948,8 @@ impl NetworkBehaviour for DiscoveryBehaviour {
record.publisher.map(Into::into),
record.expires,
),
)),
))
},
_ => {},
},
KademliaEvent::OutboundQueryProgressed {
@@ -1417,8 +1419,8 @@ mod tests {
match e {
SwarmEvent::Behaviour(behavior) => {
match behavior {
DiscoveryOut::UnroutablePeer(other) |
DiscoveryOut::Discovered(other) => {
DiscoveryOut::UnroutablePeer(other)
| DiscoveryOut::Discovered(other) => {
// Call `add_self_reported_address` to simulate identify
// happening.
let addr = swarms
@@ -507,8 +507,9 @@ impl Discovery {
let ip = match address.iter().next() {
Some(Protocol::Ip4(ip)) => IpNetwork::from(ip),
Some(Protocol::Ip6(ip)) => IpNetwork::from(ip),
Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) =>
return true,
Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => {
return true
},
_ => return false,
};
@@ -554,8 +555,8 @@ impl Discovery {
}
},
None => {
let oldest = (self.address_confirmations.len() >=
self.address_confirmations.limiter().max_length() as usize)
let oldest = (self.address_confirmations.len()
>= self.address_confirmations.limiter().max_length() as usize)
.then(|| {
self.address_confirmations.pop_oldest().map(|(address, peers)| {
if peers.len() >= MIN_ADDRESS_CONFIRMATIONS {
@@ -668,8 +669,9 @@ impl Stream for Discovery {
record,
}));
},
Poll::Ready(Some(KademliaEvent::PutRecordSuccess { query_id, key: _ })) =>
return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })),
Poll::Ready(Some(KademliaEvent::PutRecordSuccess { query_id, key: _ })) => {
return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id }))
},
Poll::Ready(Some(KademliaEvent::QueryFailed { query_id })) => {
match this.random_walk_query_id == Some(query_id) {
true => {
@@ -781,16 +783,18 @@ impl Stream for Discovery {
match Pin::new(&mut this.ping_event_stream).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(PingEvent::Ping { peer, ping })) =>
return Poll::Ready(Some(DiscoveryEvent::Ping { peer, rtt: ping })),
Poll::Ready(Some(PingEvent::Ping { peer, ping })) => {
return Poll::Ready(Some(DiscoveryEvent::Ping { peer, rtt: ping }))
},
}
if let Some(ref mut mdns_event_stream) = &mut this.mdns_event_stream {
match Pin::new(mdns_event_stream).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(MdnsEvent::Discovered(addresses))) =>
return Poll::Ready(Some(DiscoveryEvent::Discovered { addresses })),
Poll::Ready(Some(MdnsEvent::Discovered(addresses))) => {
return Poll::Ready(Some(DiscoveryEvent::Discovered { addresses }))
},
}
}
+14 -11
View File
@@ -206,19 +206,20 @@ impl Litep2pNetworkBackend {
.into_iter()
.filter_map(|address| match address.iter().next() {
Some(
Protocol::Dns(_) |
Protocol::Dns4(_) |
Protocol::Dns6(_) |
Protocol::Ip6(_) |
Protocol::Ip4(_),
Protocol::Dns(_)
| Protocol::Dns4(_)
| Protocol::Dns6(_)
| Protocol::Ip6(_)
| Protocol::Ip4(_),
) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_)))
{
Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash.into())
.map_or(None, |peer| Some((peer, Some(address)))),
_ => None,
},
Some(Protocol::P2p(multihash)) =>
PeerId::from_multihash(multihash.into()).map_or(None, |peer| Some((peer, None))),
Some(Protocol::P2p(multihash)) => {
PeerId::from_multihash(multihash.into()).map_or(None, |peer| Some((peer, None)))
},
_ => None,
})
.fold(HashMap::new(), |mut acc, (peer, maybe_address)| {
@@ -300,8 +301,9 @@ impl Litep2pNetworkBackend {
match iter.next() {
Some(Protocol::Tcp(_)) => match iter.next() {
Some(Protocol::Ws(_) | Protocol::Wss(_)) =>
Some((None, Some(address.clone()))),
Some(Protocol::Ws(_) | Protocol::Wss(_)) => {
Some((None, Some(address.clone())))
},
Some(Protocol::P2p(_)) | None => Some((Some(address.clone()), None)),
protocol => {
log::error!(
@@ -484,8 +486,9 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBac
use pezsc_network_types::multiaddr::Protocol;
let address = match address.iter().last() {
Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) =>
address.with(Protocol::P2p(peer.into())),
Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) => {
address.with(Protocol::P2p(peer.into()))
},
Some(Protocol::P2p(_)) => address,
_ => return acc,
};
@@ -76,8 +76,9 @@ impl<Block: BlockT> BitswapServer<Block> {
log::trace!(target: LOG_TARGET, "found cid {cid:?}, hash {hash:?}");
match want_type {
WantType::Block =>
ResponseType::Block { cid, block: transaction },
WantType::Block => {
ResponseType::Block { cid, block: transaction }
},
_ => ResponseType::Presence {
cid,
presence: BlockPresenceType::Have,
@@ -123,8 +123,9 @@ pub enum Direction {
impl Direction {
fn set_reserved(&mut self, new_reserved: Reserved) {
match self {
Direction::Inbound(ref mut reserved) | Direction::Outbound(ref mut reserved) =>
*reserved = new_reserved,
Direction::Inbound(ref mut reserved) | Direction::Outbound(ref mut reserved) => {
*reserved = new_reserved
},
}
}
}
@@ -541,8 +542,8 @@ impl Peerset {
match &state {
// close was initiated either by remote ([`PeerState::Connected`]) or local node
// ([`PeerState::Closing`]) and it was a non-reserved peer
PeerState::Connected { direction: Direction::Inbound(Reserved::No) } |
PeerState::Closing { direction: Direction::Inbound(Reserved::No) } => {
PeerState::Connected { direction: Direction::Inbound(Reserved::No) }
| PeerState::Closing { direction: Direction::Inbound(Reserved::No) } => {
log::trace!(
target: LOG_TARGET,
"{}: inbound substream closed to non-reserved peer {peer:?}: {state:?}",
@@ -558,8 +559,8 @@ impl Peerset {
},
// close was initiated either by remote ([`PeerState::Connected`]) or local node
// ([`PeerState::Closing`]) and it was a non-reserved peer
PeerState::Connected { direction: Direction::Outbound(Reserved::No) } |
PeerState::Closing { direction: Direction::Outbound(Reserved::No) } => {
PeerState::Connected { direction: Direction::Outbound(Reserved::No) }
| PeerState::Closing { direction: Direction::Outbound(Reserved::No) } => {
log::trace!(
target: LOG_TARGET,
"{}: outbound substream closed to non-reserved peer {peer:?} {state:?}",
@@ -791,8 +792,8 @@ impl Peerset {
_ => {},
},
// reserved peers do not require change in the slot counts
Some(PeerState::Opening { direction: Direction::Inbound(Reserved::Yes) }) |
Some(PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) => {
Some(PeerState::Opening { direction: Direction::Inbound(Reserved::Yes) })
| Some(PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) => {
log::debug!(
target: LOG_TARGET,
"{}: substream open failure for reserved peer {peer:?}",
@@ -884,10 +885,10 @@ impl Peerset {
match self.peers.get_mut(peer) {
Some(PeerState::Disconnected | PeerState::Backoff) => {},
Some(
PeerState::Opening { ref mut direction } |
PeerState::Connected { ref mut direction } |
PeerState::Canceled { ref mut direction } |
PeerState::Closing { ref mut direction },
PeerState::Opening { ref mut direction }
| PeerState::Connected { ref mut direction }
| PeerState::Canceled { ref mut direction }
| PeerState::Closing { ref mut direction },
) => {
*direction = match direction {
Direction::Inbound(Reserved::No) => {
@@ -1440,8 +1441,8 @@ impl Stream for Peerset {
.peers
.iter()
.filter_map(|(peer, state)| {
(!self.reserved_peers.contains(peer) &&
std::matches!(state, PeerState::Connected { .. }))
(!self.reserved_peers.contains(peer)
&& std::matches!(state, PeerState::Connected { .. }))
.then_some(*peer)
})
.collect::<Vec<_>>();
@@ -162,7 +162,7 @@ async fn test_once() {
// substream to `Peerset` and move peer state to `open`.
//
// if the substream was canceled while it was opening, move peer to `closing`
2 =>
2 => {
if let Some(peer) = opening.keys().choose(&mut rng).copied() {
let direction = opening.remove(&peer).unwrap();
match peerset.report_substream_opened(peer, direction) {
@@ -173,37 +173,43 @@ async fn test_once() {
assert!(closing.insert(peer));
},
}
},
}
},
// substream failed to open
3 =>
3 => {
if let Some(peer) = opening.keys().choose(&mut rng).copied() {
let _ = opening.remove(&peer).unwrap();
peerset.report_substream_open_failure(peer, NotificationError::Rejected);
},
}
},
// substream was closed by remote peer
4 =>
4 => {
if let Some(peer) = open.keys().choose(&mut rng).copied() {
let _ = open.remove(&peer).unwrap();
peerset.report_substream_closed(peer);
assert!(closed.insert(peer));
},
}
},
// substream was closed by local node
5 =>
5 => {
if let Some(peer) = closing.iter().choose(&mut rng).copied() {
assert!(closing.remove(&peer));
assert!(closed.insert(peer));
peerset.report_substream_closed(peer);
},
}
},
// random connected peer was disconnected by the protocol
6 =>
6 => {
if let Some(peer) = open.keys().choose(&mut rng).copied() {
to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
},
}
},
// ban random peer
7 =>
7 => {
if let Some(peer) = known_peers.iter().choose(&mut rng).copied() {
peer_store_handle.report_peer(peer, ReputationChange::new_fatal(""));
},
}
},
// inbound substream is received for a peer that was considered
// outbound
8 => {
@@ -364,7 +370,7 @@ async fn test_once() {
}
},
// inbound substream received for a peer in `closed`
15 =>
15 => {
if let Some(peer) = closed.iter().choose(&mut rng).copied() {
match peerset.report_inbound_substream(peer) {
ValidationResult::Accept => {
@@ -373,7 +379,8 @@ async fn test_once() {
},
ValidationResult::Reject => {},
}
},
}
},
_ => unreachable!(),
}
}
@@ -382,36 +382,45 @@ impl RequestResponseProtocol {
};
let status = match error {
RequestResponseError::NotConnected =>
Some((RequestFailure::NotConnected, "not-connected")),
RequestResponseError::NotConnected => {
Some((RequestFailure::NotConnected, "not-connected"))
},
RequestResponseError::Rejected(reason) => {
let reason = match reason {
RejectReason::ConnectionClosed => "connection-closed",
RejectReason::SubstreamClosed => "substream-closed",
RejectReason::SubstreamOpenError(substream_error) => match substream_error {
SubstreamError::NegotiationError(NegotiationError::Timeout) =>
"substream-timeout",
SubstreamError::NegotiationError(NegotiationError::Timeout) => {
"substream-timeout"
},
_ => "substream-open-error",
},
RejectReason::DialFailed(None) => "dial-failed",
RejectReason::DialFailed(Some(ImmediateDialError::AlreadyConnected)) =>
"dial-already-connected",
RejectReason::DialFailed(Some(ImmediateDialError::PeerIdMissing)) =>
"dial-peerid-missing",
RejectReason::DialFailed(Some(ImmediateDialError::TriedToDialSelf)) =>
"dial-tried-to-dial-self",
RejectReason::DialFailed(Some(ImmediateDialError::NoAddressAvailable)) =>
"dial-no-address-available",
RejectReason::DialFailed(Some(ImmediateDialError::TaskClosed)) =>
"dial-task-closed",
RejectReason::DialFailed(Some(ImmediateDialError::ChannelClogged)) =>
"dial-channel-clogged",
RejectReason::DialFailed(Some(ImmediateDialError::AlreadyConnected)) => {
"dial-already-connected"
},
RejectReason::DialFailed(Some(ImmediateDialError::PeerIdMissing)) => {
"dial-peerid-missing"
},
RejectReason::DialFailed(Some(ImmediateDialError::TriedToDialSelf)) => {
"dial-tried-to-dial-self"
},
RejectReason::DialFailed(Some(ImmediateDialError::NoAddressAvailable)) => {
"dial-no-address-available"
},
RejectReason::DialFailed(Some(ImmediateDialError::TaskClosed)) => {
"dial-task-closed"
},
RejectReason::DialFailed(Some(ImmediateDialError::ChannelClogged)) => {
"dial-channel-clogged"
},
};
Some((RequestFailure::Refused, reason))
},
RequestResponseError::Timeout =>
Some((RequestFailure::Network(OutboundFailure::Timeout), "timeout")),
RequestResponseError::Timeout => {
Some((RequestFailure::Network(OutboundFailure::Timeout), "timeout"))
},
RequestResponseError::Canceled => {
log::debug!(
target: LOG_TARGET,
@@ -514,8 +523,9 @@ impl RequestResponseProtocol {
match sent_feedback {
None => self.handle.send_response(request_id, response),
Some(feedback) =>
self.handle.send_response_with_feedback(request_id, response, feedback),
Some(feedback) => {
self.handle.send_response_with_feedback(request_id, response, feedback)
},
}
self.metrics.register_inbound_request_success(started.elapsed());
@@ -106,10 +106,12 @@ pub enum Endpoint {
impl From<ConnectedPoint> for PeerEndpoint {
fn from(endpoint: ConnectedPoint) -> Self {
match endpoint {
ConnectedPoint::Dialer { address, role_override, port_use: _ } =>
Self::Dialing(address, role_override.into()),
ConnectedPoint::Listener { local_addr, send_back_addr } =>
Self::Listening { local_addr, send_back_addr },
ConnectedPoint::Dialer { address, role_override, port_use: _ } => {
Self::Dialing(address, role_override.into())
},
ConnectedPoint::Listener { local_addr, send_back_addr } => {
Self::Listening { local_addr, send_back_addr }
},
}
}
}
+8 -6
View File
@@ -288,8 +288,8 @@ impl PeerInfoBehaviour {
}
},
None => {
let oldest = (self.address_confirmations.len() >=
self.address_confirmations.limiter().max_length() as usize)
let oldest = (self.address_confirmations.len()
>= self.address_confirmations.limiter().max_length() as usize)
.then(|| {
self.address_confirmations.pop_oldest().map(|(address, peers)| {
if peers.len() >= MIN_ADDRESS_CONFIRMATIONS {
@@ -599,10 +599,12 @@ impl NetworkBehaviour for PeerInfoBehaviour {
event: THandlerOutEvent<Self>,
) {
match event {
Either::Left(event) =>
self.ping.on_connection_handler_event(peer_id, connection_id, event),
Either::Right(event) =>
self.identify.on_connection_handler_event(peer_id, connection_id, event),
Either::Left(event) => {
self.ping.on_connection_handler_event(peer_id, connection_id, event)
},
Either::Right(event) => {
self.identify.on_connection_handler_event(peer_id, connection_id, event)
},
}
}
+6 -4
View File
@@ -309,7 +309,7 @@ impl<B: BlockT> NetworkBehaviour for Protocol<B> {
notifications_sink,
negotiated_fallback,
..
} =>
} => {
if set_id == HARDCODED_PEERSETS_SYNC {
let _ = self.sync_handle.report_substream_opened(
peer_id,
@@ -334,8 +334,9 @@ impl<B: BlockT> NetworkBehaviour for Protocol<B> {
None
},
}
},
NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } =>
}
},
NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => {
if set_id == HARDCODED_PEERSETS_SYNC {
let _ = self
.sync_handle
@@ -349,7 +350,8 @@ impl<B: BlockT> NetworkBehaviour for Protocol<B> {
notifications_sink,
},
)
},
}
},
NotificationsOut::CustomProtocolClosed { peer_id, set_id } => {
if set_id == HARDCODED_PEERSETS_SYNC {
let _ = self.sync_handle.report_substream_closed(peer_id);
@@ -109,12 +109,13 @@ pub mod generic {
let compact = CompactStatus::decode(value)?;
let chain_status = match <Vec<u8>>::decode(value) {
Ok(v) => v,
Err(e) =>
Err(e) => {
if compact.version <= LAST_CHAIN_STATUS_VERSION {
return Err(e);
} else {
Vec::new()
},
}
},
};
let CompactStatus {
@@ -1263,8 +1263,8 @@ impl NetworkBehaviour for Notifications {
for set_id in (0..self.notif_protocols.len()).map(SetId::from) {
match self.peers.entry((peer_id, set_id)).or_insert(PeerState::Poisoned) {
// Requested | PendingRequest => Enabled
st @ &mut PeerState::Requested |
st @ &mut PeerState::PendingRequest { .. } => {
st @ &mut PeerState::Requested
| st @ &mut PeerState::PendingRequest { .. } => {
trace!(target: LOG_TARGET,
"Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.",
peer_id, set_id, endpoint
@@ -1304,10 +1304,10 @@ impl NetworkBehaviour for Notifications {
// In all other states, add this new connection to the list of closed
// inactive connections.
PeerState::Incoming { connections, .. } |
PeerState::Disabled { connections, .. } |
PeerState::DisabledPendingEnable { connections, .. } |
PeerState::Enabled { connections, .. } => {
PeerState::Incoming { connections, .. }
| PeerState::Disabled { connections, .. }
| PeerState::DisabledPendingEnable { connections, .. }
| PeerState::Enabled { connections, .. } => {
trace!(target: LOG_TARGET,
"Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.",
peer_id, set_id, endpoint, connection_id);
@@ -1593,9 +1593,9 @@ impl NetworkBehaviour for Notifications {
}
},
PeerState::Requested |
PeerState::PendingRequest { .. } |
PeerState::Backoff { .. } => {
PeerState::Requested
| PeerState::PendingRequest { .. }
| PeerState::Backoff { .. } => {
// This is a serious bug either in this state machine or in libp2p.
error!(target: LOG_TARGET,
"`inject_connection_closed` called for unknown peer {}",
@@ -1629,8 +1629,8 @@ impl NetworkBehaviour for Notifications {
// "Basic" situation: we failed to reach a peer that the peerset
// requested.
st @ PeerState::Requested |
st @ PeerState::PendingRequest { .. } => {
st @ PeerState::Requested
| st @ PeerState::PendingRequest { .. } => {
trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
self.protocol_controller_handles[usize::from(set_id)]
.dropped(peer_id);
@@ -1639,7 +1639,9 @@ impl NetworkBehaviour for Notifications {
let ban_duration = match st {
PeerState::PendingRequest { timer_deadline, .. }
if timer_deadline > now =>
cmp::max(timer_deadline - now, Duration::from_secs(5)),
{
cmp::max(timer_deadline - now, Duration::from_secs(5))
},
_ => Duration::from_secs(5),
};
@@ -1662,10 +1664,10 @@ impl NetworkBehaviour for Notifications {
// We can still get dial failures even if we are already connected
// to the peer, as an extra diagnostic for an earlier attempt.
st @ PeerState::Disabled { .. } |
st @ PeerState::Enabled { .. } |
st @ PeerState::DisabledPendingEnable { .. } |
st @ PeerState::Incoming { .. } => {
st @ PeerState::Disabled { .. }
| st @ PeerState::Enabled { .. }
| st @ PeerState::DisabledPendingEnable { .. }
| st @ PeerState::Incoming { .. } => {
*entry.into_mut() = st;
},
@@ -1793,8 +1795,8 @@ impl NetworkBehaviour for Notifications {
// more to do.
debug_assert!(matches!(
connec_state,
ConnectionState::OpenDesiredByRemote |
ConnectionState::Closing | ConnectionState::Opening
ConnectionState::OpenDesiredByRemote
| ConnectionState::Closing | ConnectionState::Opening
));
}
} else {
@@ -2005,8 +2007,8 @@ impl NetworkBehaviour for Notifications {
// All connections in `Disabled` and `DisabledPendingEnable` have been sent a
// `Close` message already, and as such ignore any `CloseDesired` message.
state @ PeerState::Disabled { .. } |
state @ PeerState::DisabledPendingEnable { .. } => {
state @ PeerState::Disabled { .. }
| state @ PeerState::DisabledPendingEnable { .. } => {
*entry.into_mut() = state;
},
state => {
@@ -2026,10 +2028,10 @@ impl NetworkBehaviour for Notifications {
match self.peers.get_mut(&(peer_id, set_id)) {
// Move the connection from `Closing` to `Closed`.
Some(PeerState::Incoming { connections, .. }) |
Some(PeerState::DisabledPendingEnable { connections, .. }) |
Some(PeerState::Disabled { connections, .. }) |
Some(PeerState::Enabled { connections, .. }) => {
Some(PeerState::Incoming { connections, .. })
| Some(PeerState::DisabledPendingEnable { connections, .. })
| Some(PeerState::Disabled { connections, .. })
| Some(PeerState::Enabled { connections, .. }) => {
if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| {
*c == connection_id && matches!(s, ConnectionState::Closing)
}) {
@@ -2094,8 +2096,8 @@ impl NetworkBehaviour for Notifications {
*connec_state = ConnectionState::Open(notifications_sink);
} else if let Some((_, connec_state)) =
connections.iter_mut().find(|(c, s)| {
*c == connection_id &&
matches!(s, ConnectionState::OpeningThenClosing)
*c == connection_id
&& matches!(s, ConnectionState::OpeningThenClosing)
}) {
*connec_state = ConnectionState::Closing;
} else {
@@ -2105,9 +2107,9 @@ impl NetworkBehaviour for Notifications {
}
},
Some(PeerState::Incoming { connections, .. }) |
Some(PeerState::DisabledPendingEnable { connections, .. }) |
Some(PeerState::Disabled { connections, .. }) => {
Some(PeerState::Incoming { connections, .. })
| Some(PeerState::DisabledPendingEnable { connections, .. })
| Some(PeerState::Disabled { connections, .. }) => {
if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| {
*c == connection_id && matches!(s, ConnectionState::OpeningThenClosing)
}) {
@@ -2156,8 +2158,8 @@ impl NetworkBehaviour for Notifications {
*connec_state = ConnectionState::Closed;
} else if let Some((_, connec_state)) =
connections.iter_mut().find(|(c, s)| {
*c == connection_id &&
matches!(s, ConnectionState::OpeningThenClosing)
*c == connection_id
&& matches!(s, ConnectionState::OpeningThenClosing)
}) {
*connec_state = ConnectionState::Closing;
} else {
@@ -2181,17 +2183,17 @@ impl NetworkBehaviour for Notifications {
*entry.into_mut() = PeerState::Enabled { connections };
}
},
mut state @ PeerState::Incoming { .. } |
mut state @ PeerState::DisabledPendingEnable { .. } |
mut state @ PeerState::Disabled { .. } => {
mut state @ PeerState::Incoming { .. }
| mut state @ PeerState::DisabledPendingEnable { .. }
| mut state @ PeerState::Disabled { .. } => {
match &mut state {
PeerState::Incoming { connections, .. } |
PeerState::Disabled { connections, .. } |
PeerState::DisabledPendingEnable { connections, .. } => {
PeerState::Incoming { connections, .. }
| PeerState::Disabled { connections, .. }
| PeerState::DisabledPendingEnable { connections, .. } => {
if let Some((_, connec_state)) =
connections.iter_mut().find(|(c, s)| {
*c == connection_id &&
matches!(s, ConnectionState::OpeningThenClosing)
*c == connection_id
&& matches!(s, ConnectionState::OpeningThenClosing)
}) {
*connec_state = ConnectionState::Closing;
} else {
@@ -2301,8 +2303,8 @@ impl NetworkBehaviour for Notifications {
NotificationCommand::SetHandshake(handshake) => {
self.set_notif_protocol_handshake(set_id.into(), handshake);
},
NotificationCommand::OpenSubstream(_peer) |
NotificationCommand::CloseSubstream(_peer) => {
NotificationCommand::OpenSubstream(_peer)
| NotificationCommand::CloseSubstream(_peer) => {
todo!("substream control not implemented");
},
},
@@ -2416,8 +2418,9 @@ mod tests {
(ConnectionState::Closing, ConnectionState::Closing) => true,
(ConnectionState::Opening, ConnectionState::Opening) => true,
(ConnectionState::OpeningThenClosing, ConnectionState::OpeningThenClosing) => true,
(ConnectionState::OpenDesiredByRemote, ConnectionState::OpenDesiredByRemote) =>
true,
(ConnectionState::OpenDesiredByRemote, ConnectionState::OpenDesiredByRemote) => {
true
},
(ConnectionState::Open(_), ConnectionState::Open(_)) => true,
_ => false,
}
@@ -560,8 +560,8 @@ impl ConnectionHandler for NotifsHandler {
// to do.
return;
},
State::Opening { ref mut in_substream, .. } |
State::Open { ref mut in_substream, .. } => {
State::Opening { ref mut in_substream, .. }
| State::Open { ref mut in_substream, .. } => {
if in_substream.is_some() {
// Same remark as above.
return;
@@ -579,8 +579,8 @@ impl ConnectionHandler for NotifsHandler {
let (new_open, protocol_index) = (outbound.protocol, outbound.info);
match self.protocols[protocol_index].state {
State::Closed { ref mut pending_opening } |
State::OpenDesiredByRemote { ref mut pending_opening, .. } => {
State::Closed { ref mut pending_opening }
| State::OpenDesiredByRemote { ref mut pending_opening, .. } => {
debug_assert!(*pending_opening);
*pending_opening = false;
},
@@ -626,8 +626,8 @@ impl ConnectionHandler for NotifsHandler {
[dial_upgrade_error.info]
.state
{
State::Closed { ref mut pending_opening } |
State::OpenDesiredByRemote { ref mut pending_opening, .. } => {
State::Closed { ref mut pending_opening }
| State::OpenDesiredByRemote { ref mut pending_opening, .. } => {
debug_assert!(*pending_opening);
*pending_opening = false;
},
@@ -786,10 +786,11 @@ impl ConnectionHandler for NotifsHandler {
// available in `notifications_sink_rx`. This avoids waking up the task when
// a substream is ready to send if there isn't actually something to send.
match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) {
Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) =>
Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => {
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
NotifsHandlerOut::Close { protocol_index },
)),
))
},
Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {},
Poll::Ready(None) | Poll::Pending => break,
}
@@ -803,11 +804,12 @@ impl ConnectionHandler for NotifsHandler {
// Now that the substream is ready for a message, grab what to send.
let message = match notifications_sink_rx.poll_next_unpin(cx) {
Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) =>
message,
Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) |
Poll::Ready(None) |
Poll::Pending => {
Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => {
message
},
Poll::Ready(Some(NotificationsSinkMessage::ForceClose))
| Poll::Ready(None)
| Poll::Pending => {
// Should never be reached, as per `poll_peek` above.
debug_assert!(false);
break;
@@ -839,10 +841,12 @@ impl ConnectionHandler for NotifsHandler {
*out_substream = None;
let reason = match error {
NotificationsOutError::Io(_) | NotificationsOutError::Closed =>
CloseReason::RemoteRequest,
NotificationsOutError::UnexpectedData =>
CloseReason::ProtocolMisbehavior,
NotificationsOutError::Io(_) | NotificationsOutError::Closed => {
CloseReason::RemoteRequest
},
NotificationsOutError::UnexpectedData => {
CloseReason::ProtocolMisbehavior
},
};
let event = NotifsHandlerOut::CloseDesired { protocol_index, reason };
@@ -851,10 +855,10 @@ impl ConnectionHandler for NotifsHandler {
};
},
State::Closed { .. } |
State::Opening { .. } |
State::Open { out_substream: None, .. } |
State::OpenDesiredByRemote { .. } => {},
State::Closed { .. }
| State::Opening { .. }
| State::Open { out_substream: None, .. }
| State::OpenDesiredByRemote { .. } => {},
}
}
@@ -863,11 +867,11 @@ impl ConnectionHandler for NotifsHandler {
// Inbound substreams being closed is always tolerated, except for the
// `OpenDesiredByRemote` state which might need to be switched back to `Closed`.
match &mut self.protocols[protocol_index].state {
State::Closed { .. } |
State::Open { in_substream: None, .. } |
State::Opening { in_substream: None, .. } => {},
State::Closed { .. }
| State::Open { in_substream: None, .. }
| State::Opening { in_substream: None, .. } => {},
State::Open { in_substream: in_substream @ Some(_), .. } =>
State::Open { in_substream: in_substream @ Some(_), .. } => {
match futures::prelude::stream::Stream::poll_next(
Pin::new(in_substream.as_mut().unwrap()),
cx,
@@ -878,9 +882,10 @@ impl ConnectionHandler for NotifsHandler {
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event));
},
Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None,
},
}
},
State::OpenDesiredByRemote { in_substream, pending_opening } =>
State::OpenDesiredByRemote { in_substream, pending_opening } => {
match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) {
Poll::Pending => {},
Poll::Ready(Ok(())) => {},
@@ -894,9 +899,10 @@ impl ConnectionHandler for NotifsHandler {
},
));
},
},
}
},
State::Opening { in_substream: in_substream @ Some(_), .. } =>
State::Opening { in_substream: in_substream @ Some(_), .. } => {
match NotificationsInSubstream::poll_process(
Pin::new(in_substream.as_mut().unwrap()),
cx,
@@ -904,7 +910,8 @@ impl ConnectionHandler for NotifsHandler {
Poll::Pending => {},
Poll::Ready(Ok(())) => {},
Poll::Ready(Err(_)) => *in_substream = None,
},
}
},
}
}
@@ -1003,8 +1010,9 @@ pub mod tests {
};
futures::future::poll_fn(|cx| match substream.notifications.poll_next_unpin(cx) {
Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) =>
Poll::Ready(Some(message)),
Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => {
Poll::Ready(Some(message))
},
Poll::Pending => Poll::Ready(None),
Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) | Poll::Ready(None) => {
panic!("sink closed")
@@ -1108,8 +1116,9 @@ pub mod tests {
) -> Poll<Result<usize, Error>> {
match self.rx.poll_recv(cx) {
Poll::Ready(Some(data)) => self.rx_buffer.extend_from_slice(&data),
Poll::Ready(None) =>
return Poll::Ready(Err(std::io::ErrorKind::UnexpectedEof.into())),
Poll::Ready(None) => {
return Poll::Ready(Err(std::io::ErrorKind::UnexpectedEof.into()))
},
_ => {},
}
@@ -295,12 +295,13 @@ impl NotificationService for NotificationHandle {
async fn next_event(&mut self) -> Option<NotificationEvent> {
loop {
match self.rx.next().await? {
InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } =>
InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => {
return Some(NotificationEvent::ValidateInboundSubstream {
peer: peer.into(),
handshake,
result_tx,
}),
})
},
InnerNotificationEvent::NotificationStreamOpened {
peer,
handshake,
@@ -326,11 +327,12 @@ impl NotificationService for NotificationHandle {
self.peers.remove(&peer);
return Some(NotificationEvent::NotificationStreamClosed { peer: peer.into() });
},
InnerNotificationEvent::NotificationReceived { peer, notification } =>
InnerNotificationEvent::NotificationReceived { peer, notification } => {
return Some(NotificationEvent::NotificationReceived {
peer: peer.into(),
notification,
}),
})
},
InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => {
match self.peers.get_mut(&peer) {
None => log::error!(
@@ -515,8 +517,9 @@ impl ProtocolHandle {
tokio::spawn(async move {
while let Some(event) = results.next().await {
match event {
Err(_) | Ok(ValidationResult::Reject) =>
return tx.send(ValidationResult::Reject),
Err(_) | Ok(ValidationResult::Reject) => {
return tx.send(ValidationResult::Reject)
},
Ok(ValidationResult::Accept) => {},
}
}
@@ -209,12 +209,12 @@ async fn libp2p_to_litep2p_substream() {
let mut libp2p_1111_seen = false;
let mut libp2p_2222_seen = false;
while !libp2p_ready ||
!litep2p_ready ||
!litep2p_3333_seen ||
!litep2p_4444_seen ||
!libp2p_1111_seen ||
!libp2p_2222_seen
while !libp2p_ready
|| !litep2p_ready
|| !litep2p_3333_seen
|| !litep2p_4444_seen
|| !libp2p_1111_seen
|| !libp2p_2222_seen
{
tokio::select! {
event = libp2p.select_next_some() => match event {
@@ -318,9 +318,9 @@ async fn reconnect_after_disconnect() {
NotificationsOut::CustomProtocolClosed { .. },
)) => match service1_state {
ServiceState::FirstConnec => service1_state = ServiceState::Disconnected,
ServiceState::ConnectedAgain |
ServiceState::NotConnected |
ServiceState::Disconnected => panic!(),
ServiceState::ConnectedAgain
| ServiceState::NotConnected
| ServiceState::Disconnected => panic!(),
},
future::Either::Right(SwarmEvent::Behaviour(
NotificationsOut::CustomProtocolOpen { .. },
@@ -340,9 +340,9 @@ async fn reconnect_after_disconnect() {
NotificationsOut::CustomProtocolClosed { .. },
)) => match service2_state {
ServiceState::FirstConnec => service2_state = ServiceState::Disconnected,
ServiceState::ConnectedAgain |
ServiceState::NotConnected |
ServiceState::Disconnected => panic!(),
ServiceState::ConnectedAgain
| ServiceState::NotConnected
| ServiceState::Disconnected => panic!(),
},
_ => {},
}
@@ -355,12 +355,12 @@ async fn reconnect_after_disconnect() {
// In this case the disconnected node does not transit via `ServiceState::NotConnected`
// and stays in `ServiceState::FirstConnec`.
// TODO: update this once the fix is finally merged.
if service1_state == ServiceState::ConnectedAgain &&
service2_state == ServiceState::ConnectedAgain ||
service1_state == ServiceState::ConnectedAgain &&
service2_state == ServiceState::FirstConnec ||
service1_state == ServiceState::FirstConnec &&
service2_state == ServiceState::ConnectedAgain
if service1_state == ServiceState::ConnectedAgain
&& service2_state == ServiceState::ConnectedAgain
|| service1_state == ServiceState::ConnectedAgain
&& service2_state == ServiceState::FirstConnec
|| service1_state == ServiceState::FirstConnec
&& service2_state == ServiceState::ConnectedAgain
{
break;
}
@@ -384,8 +384,8 @@ async fn reconnect_after_disconnect() {
};
match event {
SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) |
SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(),
SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. })
| SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(),
_ => {},
}
}

Some files were not shown because too many files have changed in this diff Show More