Fix spelling mistakes across the whole repository (#3808)

**Update:** Pushed additional changes based on the review comments.

**This pull request fixes various spelling mistakes in this
repository.**

Most of the changes are contained in the first **3** commits:

- `Fix spelling mistakes in comments and docs`

- `Fix spelling mistakes in test names`

- `Fix spelling mistakes in error messages, panic messages, logs and
tracing`

Other source code spelling mistakes are separated into individual
commits for easier reviewing:

- `Fix the spelling of 'authority'`

- `Fix the spelling of 'REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY'`

- `Fix the spelling of 'prev_enqueud_messages'`

- `Fix the spelling of 'endpoint'`

- `Fix the spelling of 'children'`

- `Fix the spelling of 'PenpalSiblingSovereignAccount'`

- `Fix the spelling of 'PenpalSudoAccount'`

- `Fix the spelling of 'insufficient'`

- `Fix the spelling of 'PalletXcmExtrinsicsBenchmark'`

- `Fix the spelling of 'subtracted'`

- `Fix the spelling of 'CandidatePendingAvailability'`

- `Fix the spelling of 'exclusive'`

- `Fix the spelling of 'until'`

- `Fix the spelling of 'discriminator'`

- `Fix the spelling of 'nonexistent'`

- `Fix the spelling of 'subsystem'`

- `Fix the spelling of 'indices'`

- `Fix the spelling of 'committed'`

- `Fix the spelling of 'topology'`

- `Fix the spelling of 'response'`

- `Fix the spelling of 'beneficiary'`

- `Fix the spelling of 'formatted'`

- `Fix the spelling of 'UNKNOWN_PROOF_REQUEST'`

- `Fix the spelling of 'succeeded'`

- `Fix the spelling of 'reopened'`

- `Fix the spelling of 'proposer'`

- `Fix the spelling of 'InstantiationNonce'`

- `Fix the spelling of 'depositor'`

- `Fix the spelling of 'expiration'`

- `Fix the spelling of 'phantom'`

- `Fix the spelling of 'AggregatedKeyValue'`

- `Fix the spelling of 'randomness'`

- `Fix the spelling of 'defendant'`

- `Fix the spelling of 'AquaticMammal'`

- `Fix the spelling of 'transactions'`

- `Fix the spelling of 'PassingTracingSubscriber'`

- `Fix the spelling of 'TxSignaturePayload'`

- `Fix the spelling of 'versioning'`

- `Fix the spelling of 'descendant'`

- `Fix the spelling of 'overridden'`

- `Fix the spelling of 'network'`

Let me know if this structure is adequate.

**Note:** The usage of the words `Merkle`, `Merkelize`, `Merklization`,
`Merkelization`, `Merkleization`, is somewhat inconsistent but I left it
as it is.

~~**Note:** In some places the term `Receival` is used to refer to
message reception, IMO `Reception` is the correct word here, but I left
it as it is.~~

~~**Note:** In some places the term `Overlayed` is used instead of the
more acceptable version `Overlaid` but I also left it as it is.~~

~~**Note:** In some places the term `Applyable` is used instead of the
correct version `Applicable` but I also left it as it is.~~

**Note:** Some usage of British vs American english e.g. `judgement` vs
`judgment`, `initialise` vs `initialize`, `optimise` vs `optimize` etc.
are both present in different places, but I suppose that's
understandable given the number of contributors.

~~**Note:** There is a spelling mistake in `.github/CODEOWNERS` but it
triggers errors in CI when I make changes to it, so I left it as it
is.~~
This commit is contained in:
Dcompoze
2024-03-26 13:57:57 +00:00
committed by GitHub
parent b839c995c0
commit 002d9260f9
463 changed files with 1119 additions and 1017 deletions
+3 -3
View File
@@ -110,7 +110,7 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30);
/// Parachain collator may incorrectly get evicted because it's waiting to receive a number of
/// relaychain blocks before it can start creating parachain blocks. During this wait,
/// `SyncingEngine` still counts it as active and as the peer is not sending blocks, it may get
/// evicted if a block is not received within the first 30 secons since the peer connected.
/// evicted if a block is not received within the first 30 seconds since the peer connected.
///
/// To prevent this from happening, define a threshold for how long `SyncingEngine` should wait
/// before it starts evicting peers.
@@ -424,7 +424,7 @@ where
.expect("Genesis block exists; qed"),
);
// Split warp sync params into warp sync config and a channel to retreive target block
// Split warp sync params into warp sync config and a channel to retrieve target block
// header.
let (warp_sync_config, warp_sync_target_block_header_rx) =
warp_sync_params.map_or((None, None), |params| {
@@ -1057,7 +1057,7 @@ where
// still be under validation. If the peer has different genesis than the
// local node the validation fails but the peer cannot be reported in
// `validate_connection()` as that is also called by
// `ValiateInboundSubstream` which means that the peer is still being
// `ValidateInboundSubstream` which means that the peer is still being
// validated and banning the peer when handling that event would
// result in peer getting dropped twice.
//
@@ -185,7 +185,7 @@ where
+ Sync
+ 'static,
{
/// Initialize a new syncing startegy.
/// Initialize a new syncing strategy.
pub fn new(
config: SyncingConfig,
client: Arc<Client>,
@@ -418,7 +418,7 @@ where
self.state.is_some() ||
match self.chain_sync {
Some(ref s) => s.status().state.is_major_syncing(),
None => unreachable!("At least one syncing startegy is active; qed"),
None => unreachable!("At least one syncing strategy is active; qed"),
}
}
@@ -429,7 +429,7 @@ where
/// Returns the current sync status.
pub fn status(&self) -> SyncStatus<B> {
// This function presumes that startegies are executed serially and must be refactored
// This function presumes that strategies are executed serially and must be refactored
// once we have parallel strategies.
if let Some(ref warp) = self.warp {
warp.status()
@@ -438,7 +438,7 @@ where
} else if let Some(ref chain_sync) = self.chain_sync {
chain_sync.status()
} else {
unreachable!("At least one syncing startegy is always active; qed")
unreachable!("At least one syncing strategy is always active; qed")
}
}
@@ -518,7 +518,7 @@ where
/// Proceed with the next strategy if the active one finished.
pub fn proceed_to_next(&mut self) -> Result<(), ClientError> {
// The strategies are switched as `WarpSync` -> `StateStartegy` -> `ChainSync`.
// The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`.
if let Some(ref mut warp) = self.warp {
match warp.take_result() {
Some(res) => {
@@ -569,7 +569,7 @@ where
},
}
} else if let Some(state) = &self.state {
if state.is_succeded() {
if state.is_succeeded() {
info!(target: LOG_TARGET, "State sync is complete, continuing with block sync.");
} else {
error!(target: LOG_TARGET, "State sync failed. Falling back to full sync.");
@@ -117,7 +117,7 @@ mod rep {
/// Reputation change for peers which send us a block with bad justifications.
pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification");
/// Reputation change when a peer sent us invlid ancestry result.
/// Reputation change when a peer sent us invalid ancestry result.
pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error");
/// Peer response data does not have requested bits.
@@ -1334,7 +1334,7 @@ where
PeerSyncState::DownloadingJustification(_) => {
// Peers that were downloading justifications
// should be kept in that state.
// We make sure our commmon number is at least something we have.
// We make sure our common number is at least something we have.
trace!(
target: LOG_TARGET,
"Keeping peer {} after restart, updating common number from={} => to={} (our best).",
@@ -189,7 +189,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50);
}
/// Send a block annoucnement for the given `header`.
/// Send a block announcement for the given `header`.
fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync<Block, TestClient>) {
let announce = BlockAnnounce {
header: header.clone(),
@@ -278,7 +278,7 @@ fn unwrap_from_block_number(from: FromBlock<Hash, u64>) -> u64 {
/// announcement from this node in its sync process. Meaning our common number didn't change. It
/// is now expected that we start an ancestor search to find the common number.
#[test]
fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() {
fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() {
sp_tracing::try_init_simple();
let blocks = {
@@ -472,7 +472,7 @@ fn can_sync_huge_fork() {
let actions = sync.take_actions().collect::<Vec<_>>();
request = if actions.is_empty() {
// We found the ancenstor
// We found the ancestor
break
} else {
assert_eq!(actions.len(), 1);
@@ -607,7 +607,7 @@ fn syncs_fork_without_duplicate_requests() {
let actions = sync.take_actions().collect::<Vec<_>>();
request = if actions.is_empty() {
// We found the ancenstor
// We found the ancestor
break
} else {
assert_eq!(actions.len(), 1);
@@ -79,7 +79,7 @@ pub struct StateStrategy<B: BlockT> {
state_sync: Box<dyn StateSyncProvider<B>>,
peers: HashMap<PeerId, Peer<B>>,
actions: Vec<StateStrategyAction<B>>,
succeded: bool,
succeeded: bool,
}
impl<B: BlockT> StateStrategy<B> {
@@ -110,7 +110,7 @@ impl<B: BlockT> StateStrategy<B> {
)),
peers,
actions: Vec::new(),
succeded: false,
succeeded: false,
}
}
@@ -129,7 +129,7 @@ impl<B: BlockT> StateStrategy<B> {
})
.collect(),
actions: Vec::new(),
succeded: false,
succeeded: false,
}
}
@@ -260,7 +260,7 @@ impl<B: BlockT> StateStrategy<B> {
"Failed to import target block with state: {e:?}."
);
});
self.succeded |= results.into_iter().any(|result| result.is_ok());
self.succeeded |= results.into_iter().any(|result| result.is_ok());
self.actions.push(StateStrategyAction::Finished);
}
}
@@ -342,10 +342,10 @@ impl<B: BlockT> StateStrategy<B> {
std::mem::take(&mut self.actions).into_iter()
}
/// Check if state sync has succeded.
/// Check if state sync has succeeded.
#[must_use]
pub fn is_succeded(&self) -> bool {
self.succeded
pub fn is_succeeded(&self) -> bool {
self.succeeded
}
}
@@ -669,7 +669,7 @@ mod test {
}
#[test]
fn succesfully_importing_target_block_finishes_strategy() {
fn successfully_importing_target_block_finishes_strategy() {
let target_hash = Hash::random();
let mut state_sync_provider = MockStateSync::<Block>::new();
state_sync_provider.expect_target_hash().return_const(target_hash);
@@ -968,7 +968,7 @@ mod test {
warp_sync.on_warp_proof_response(&request_peer_id, EncodedProof(Vec::new()));
// We only interested in alredy generated actions, not new requests.
// We only interested in already generated actions, not new requests.
let actions = std::mem::take(&mut warp_sync.actions);
assert_eq!(actions.len(), 1);
assert!(matches!(
@@ -57,7 +57,7 @@ pub fn generate_request_response_config<Hash: AsRef<[u8]>>(
}
}
/// Generate the grandpa warp sync protocol name from the genesi hash and fork id.
/// Generate the grandpa warp sync protocol name from the genesis hash and fork id.
fn generate_protocol_name<Hash: AsRef<[u8]>>(genesis_hash: Hash, fork_id: Option<&str>) -> String {
let genesis_hash = genesis_hash.as_ref();
if let Some(fork_id) = fork_id {