Rewrap all comments to 100 line width (#9490)

* reformat everything again

* manual formatting

* last manual fix

* Fix build
This commit is contained in:
Kian Paimani
2021-08-11 16:56:55 +02:00
committed by GitHub
parent 8180c58700
commit abd08e29ce
258 changed files with 1776 additions and 1447 deletions
+2 -1
View File
@@ -138,7 +138,8 @@ impl core::Benchmark for ImportBenchmark {
// should be 5 per signed extrinsic + 1 per unsigned
// we have 1 unsigned and the rest are signed in the block
// those 5 events per signed are:
// - new account (RawEvent::NewAccount) as we always transfer fund to non-existant account
// - new account (RawEvent::NewAccount) as we always transfer fund to
// non-existant account
// - endowed (RawEvent::Endowed) for this new account
// - successful transfer (RawEvent::Transfer) for this transfer operation
// - deposit event for charging transaction fee
+3
View File
@@ -78,10 +78,13 @@ fn session_keys(
}
fn staging_testnet_config_genesis() -> GenesisConfig {
#[rustfmt::skip]
// stash, controller, session-key
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
//
// and
//
// for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
+4 -2
View File
@@ -50,7 +50,8 @@ pub fn bloaty_code_unwrap() -> &'static [u8] {
)
}
/// Default transfer fee. This will use the same logic that is implemented in transaction-payment module.
/// Default transfer fee. This will use the same logic that is implemented in transaction-payment
/// module.
///
/// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic`
/// at block `n`, it must be called prior to executing block `n` to do the calculation with the
@@ -721,7 +722,8 @@ fn native_big_block_import_succeeds() {
fn native_big_block_import_fails_on_fallback() {
let mut t = new_test_ext(compact_code_unwrap(), false);
// We set the heap pages to 8 because we know that should give an OOM in WASM with the given block.
// We set the heap pages to 8 because we know that should give an OOM in WASM with the given
// block.
set_heap_pages(&mut t.ext(), 8);
assert!(executor_call::<NeverNativeValue, fn() -> _>(
+2 -2
View File
@@ -306,8 +306,8 @@ impl<'a> Iterator for BlockContentIterator<'a> {
BlockType::RandomTransfersReaping => {
Call::Balances(BalancesCall::transfer(
sp_runtime::MultiAddress::Id(receiver),
// Transfer so that ending balance would be 1 less than existential deposit
// so that we kill the sender account.
// Transfer so that ending balance would be 1 less than existential
// deposit so that we kill the sender account.
100 * DOLLARS - (node_runtime::ExistentialDeposit::get() - 1),
))
},
@@ -217,7 +217,7 @@ impl Link {
/// | 0 | next element link |
/// +--------------+-------------------+
/// ```
///
///
/// ## Occupied header
/// ```ignore
/// 64 32 0
+14 -7
View File
@@ -365,14 +365,16 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
key: &StorageKey,
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block.
/// Given a `BlockId` and a key prefix, return the matching child storage keys and values in
/// that block.
fn storage_pairs(
&self,
id: &BlockId<Block>,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<(StorageKey, StorageData)>>;
/// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block.
/// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in
/// that block.
fn storage_keys_iter<'a>(
&self,
id: &BlockId<Block>,
@@ -380,7 +382,8 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<'a, B::State, Block>>;
/// Given a `BlockId`, a key and a child storage key, return the value under the key in that block.
/// Given a `BlockId`, a key and a child storage key, return the value under the key in that
/// block.
fn child_storage(
&self,
id: &BlockId<Block>,
@@ -388,7 +391,8 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
key: &StorageKey,
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys.
/// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage
/// keys.
fn child_storage_keys(
&self,
id: &BlockId<Block>,
@@ -406,7 +410,8 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<'a, B::State, Block>>;
/// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block.
/// Given a `BlockId`, a key and a child storage key, return the hash under the key in that
/// block.
fn child_storage_hash(
&self,
id: &BlockId<Block>,
@@ -569,7 +574,8 @@ pub trait PrunableStateChangesTrieStorage<Block: BlockT>:
) -> sp_blockchain::Result<ChangesTrieConfigurationRange<NumberFor<Block>, Block::Hash>>;
/// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range.
/// It is guaranteed that we have no any changes tries before (and including) this block.
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if created).
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if
/// created).
fn oldest_pruned_digest_range_end(&self) -> NumberFor<Block>;
}
@@ -616,7 +622,8 @@ pub trait ProvideChtRoots<Block: BlockT> {
block: NumberFor<Block>,
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Get changes trie CHT root for given block. Returns None if the block is not a part of any CHT.
/// Get changes trie CHT root for given block. Returns None if the block is not a part of any
/// CHT.
fn changes_trie_cht_root(
&self,
cht_size: NumberFor<Block>,
+2 -1
View File
@@ -47,7 +47,8 @@ pub fn size<N: From<u32>>() -> N {
SIZE.into()
}
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized.
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is
/// canonized.
pub fn is_build_required<N>(cht_size: N, block_num: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
+2 -1
View File
@@ -125,7 +125,8 @@ where
displaced
}
/// Note a block height finalized, displacing all leaves with number less than the finalized block's.
/// Note a block height finalized, displacing all leaves with number less than the finalized
/// block's.
///
/// Although it would be more technically correct to also prune out leaves at the
/// same number as the finalized block, but with different hashes, the current behavior
+7 -6
View File
@@ -55,11 +55,11 @@ pub trait ProofProvider<Block: BlockT> {
id: &BlockId<Block>,
) -> sp_blockchain::Result<(Block::Header, StorageProof)>;
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
/// of this block.
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given
/// blocks range. `min` is the hash of the first block, which changes trie root is known to the
/// requester - when we're using changes tries from ascendants of this block, we should provide
/// proofs for changes tries roots `max` is the hash of the last block known to the requester -
/// we can't use changes tries from descendants of this block.
/// Works only for runtimes that are supporting changes tries.
fn key_changes_proof(
&self,
@@ -72,7 +72,8 @@ pub trait ProofProvider<Block: BlockT> {
) -> sp_blockchain::Result<ChangesProof<Block::Header>>;
/// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively,
/// building proofs until size limit is reached. Returns combined proof and the number of collected keys.
/// building proofs until size limit is reached. Returns combined proof and the number of
/// collected keys.
fn read_proof_collection(
&self,
id: &BlockId<Block>,
@@ -87,8 +87,8 @@ impl Default for WorkerConfig {
max_publish_interval: Duration::from_secs(1 * 60 * 60),
keystore_refresh_interval: Duration::from_secs(60),
// External addresses of remote authorities can change at any given point in time. The
// interval on which to trigger new queries for the current and next authorities is a trade
// off between efficiency and performance.
// interval on which to trigger new queries for the current and next authorities is a
// trade off between efficiency and performance.
//
// Querying 700 [`AuthorityId`]s takes ~8m on the Kusama DHT (16th Nov 2020) when
// comparing `authority_discovery_authority_addresses_requested_total` and
@@ -91,7 +91,8 @@ pub enum Role {
///
/// 4. Put addresses and signature as a record with the authority id as a key on a Kademlia DHT.
///
/// When constructed with either [`Role::PublishAndDiscover`] or [`Role::Discover`] a [`Worker`] will
/// When constructed with either [`Role::PublishAndDiscover`] or [`Role::Discover`] a [`Worker`]
/// will
///
/// 1. Retrieve the current and next set of authorities.
///
@@ -68,8 +68,8 @@ pub struct ProposerFactory<A, B, C, PR> {
metrics: PrometheusMetrics,
/// The default block size limit.
///
/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size limit will be
/// used.
/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
/// limit will be used.
default_block_size_limit: usize,
telemetry: Option<TelemetryHandle>,
/// When estimating the block size, should the proof be included?
@@ -81,7 +81,8 @@ pub struct ProposerFactory<A, B, C, PR> {
impl<A, B, C> ProposerFactory<A, B, C, DisableProofRecording> {
/// Create a new proposer factory.
///
/// Proof recording will be disabled when using proposers built by this instance to build blocks.
/// Proof recording will be disabled when using proposers built by this instance to build
/// blocks.
pub fn new(
spawn_handle: impl SpawnNamed + 'static,
client: Arc<C>,
@@ -140,7 +141,8 @@ impl<A, B, C, PR> ProposerFactory<A, B, C, PR> {
/// The default value for the block size limit is:
/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
///
/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value will be used.
/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
/// will be used.
pub fn set_default_block_size_limit(&mut self, limit: usize) {
self.default_block_size_limit = limit;
}
+2 -1
View File
@@ -55,7 +55,8 @@ pub fn read_uri(uri: Option<&String>) -> error::Result<String> {
///
/// 1. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_phrase`].
///
/// 2. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_string_with_seed`].
/// 2. Try to construct the `Pair` while using `uri` as input for
/// [`sp_core::Pair::from_string_with_seed`].
///
/// 3. Try to construct the `Pair::Public` while using `uri` as input for
/// [`sp_core::crypto::Ss58Codec::from_string_with_version`].
+9 -6
View File
@@ -92,8 +92,9 @@ pub trait SubstrateCli: Sized {
fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn ChainSpec>, String>;
/// Helper function used to parse the command line arguments. This is the equivalent of
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of
/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`.
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the
/// name of the application, author, "about" and version. It will also set
/// `AppSettings::GlobalVersion`.
///
/// To allow running the node without subcommand, tt also sets a few more settings:
/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`.
@@ -108,8 +109,9 @@ pub trait SubstrateCli: Sized {
}
/// Helper function used to parse the command line arguments. This is the equivalent of
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of
/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`.
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the
/// name of the application, author, "about" and version. It will also set
/// `AppSettings::GlobalVersion`.
///
/// To allow running the node without subcommand, it also sets a few more settings:
/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`.
@@ -166,8 +168,9 @@ pub trait SubstrateCli: Sized {
}
/// Helper function used to parse the command line arguments. This is the equivalent of
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of
/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`.
/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the
/// name of the application, author, "about" and version. It will also set
/// `AppSettings::GlobalVersion`.
///
/// To allow running the node without subcommand, it also sets a few more settings:
/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`.
@@ -167,7 +167,8 @@ pub struct ExecutionStrategiesParams {
)]
pub execution_offchain_worker: Option<ExecutionStrategy>,
/// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks.
/// The means of execution used when calling into the runtime while not syncing, importing or
/// constructing blocks.
#[structopt(
long = "execution-other",
value_name = "STRATEGY",
+2 -1
View File
@@ -40,7 +40,8 @@ pub use crate::params::{
transaction_pool_params::*,
};
/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal.
/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a
/// decimal.
#[derive(Debug, Clone)]
pub struct GenericNumber(String);
@@ -110,13 +110,13 @@ pub struct NetworkParams {
/// Enable peer discovery on local networks.
///
/// By default this option is `true` for `--dev` or when the chain type is `Local`/`Development`
/// and false otherwise.
/// By default this option is `true` for `--dev` or when the chain type is
/// `Local`/`Development` and false otherwise.
#[structopt(long)]
pub discover_local: bool,
/// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in the
/// presence of potentially adversarial nodes.
/// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in
/// the presence of potentially adversarial nodes.
///
/// See the S/Kademlia paper for more information on the high level design as well as its
/// security improvements.
@@ -53,17 +53,16 @@ pub struct NodeKeyParams {
///
/// The secret key of the node is obtained as follows:
///
/// * If the `--node-key` option is given, the value is parsed as a secret key
/// according to the type. See the documentation for `--node-key`.
/// * If the `--node-key` option is given, the value is parsed as a secret key according to
/// the type. See the documentation for `--node-key`.
///
/// * If the `--node-key-file` option is given, the secret key is read from the
/// specified file. See the documentation for `--node-key-file`.
/// * If the `--node-key-file` option is given, the secret key is read from the specified
/// file. See the documentation for `--node-key-file`.
///
/// * Otherwise, the secret key is read from a file with a predetermined,
/// type-specific name from the chain-specific network config directory
/// inside the base directory specified by `--base-dir`. If this file does
/// not exist, it is created with a newly generated secret key of the
/// chosen type.
/// * Otherwise, the secret key is read from a file with a predetermined, type-specific name
/// from the chain-specific network config directory inside the base directory specified by
/// `--base-dir`. If this file does not exist, it is created with a newly generated secret
/// key of the chosen type.
///
/// The node's secret key determines the corresponding public key and hence the
/// node's peer ID in the context of libp2p.
@@ -26,8 +26,8 @@ use structopt::StructOpt;
pub struct SharedParams {
/// Specify the chain specification.
///
/// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file with
/// the chainspec (such as one exported by the `build-spec` subcommand).
/// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file
/// with the chainspec (such as one exported by the `build-spec` subcommand).
#[structopt(long, value_name = "CHAIN_SPEC")]
pub chain: Option<String>,
+4 -4
View File
@@ -143,8 +143,8 @@ pub struct StartAuraParams<C, SC, I, PF, SO, L, CIDP, BS, CAW> {
/// The proportion of the slot dedicated to proposing.
///
/// The block proposing will be limited to this proportion of the slot from the starting of the
/// slot. However, the proposing can still take longer when there is some lenience factor applied,
/// because there were no blocks produced for some slots.
/// slot. However, the proposing can still take longer when there is some lenience factor
/// applied, because there were no blocks produced for some slots.
pub block_proposal_slot_portion: SlotProportion,
/// The maximum proportion of the slot dedicated to proposing with any lenience factor applied
/// due to no blocks being produced.
@@ -237,8 +237,8 @@ pub struct BuildAuraWorkerParams<C, I, PF, SO, L, BS> {
/// The proportion of the slot dedicated to proposing.
///
/// The block proposing will be limited to this proportion of the slot from the starting of the
/// slot. However, the proposing can still take longer when there is some lenience factor applied,
/// because there were no blocks produced for some slots.
/// slot. However, the proposing can still take longer when there is some lenience factor
/// applied, because there were no blocks produced for some slots.
pub block_proposal_slot_portion: SlotProportion,
/// The maximum proportion of the slot dedicated to proposing with any lenience factor applied
/// due to no blocks being produced.
+2 -2
View File
@@ -430,8 +430,8 @@ pub struct BabeParams<B: BlockT, C, SC, E, I, SO, L, CIDP, BS, CAW> {
/// The proportion of the slot dedicated to proposing.
///
/// The block proposing will be limited to this proportion of the slot from the starting of the
/// slot. However, the proposing can still take longer when there is some lenience factor applied,
/// because there were no blocks produced for some slots.
/// slot. However, the proposing can still take longer when there is some lenience factor
/// applied, because there were no blocks produced for some slots.
pub block_proposal_slot_portion: SlotProportion,
/// The maximum proportion of the slot dedicated to proposing with any lenience factor applied
@@ -250,8 +250,8 @@ impl<Block: BlockT, Transaction> BlockImportParams<Block, Transaction> {
/// Auxiliary function for "converting" the transaction type.
///
/// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that `Self` now
/// uses a different transaction type.
/// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that
/// `Self` now uses a different transaction type.
pub fn clear_storage_changes_and_mutate<Transaction2>(
self,
) -> BlockImportParams<Block, Transaction2> {
@@ -54,10 +54,11 @@ impl<T> Drop for SharedDataLockedUpgradable<T> {
/// Created by [`SharedData::shared_data_locked`].
///
/// As long as this object isn't dropped, the shared data is held in a mutex guard and the shared
/// data is tagged as locked. Access to the shared data is provided through [`Deref`](std::ops::Deref) and
/// [`DerefMut`](std::ops::DerefMut). The trick is to use [`Self::release_mutex`] to release the mutex, but still keep
/// the shared data locked. This means every other thread trying to access the shared data in this
/// time will need to wait until this lock is freed.
/// data is tagged as locked. Access to the shared data is provided through
/// [`Deref`](std::ops::Deref) and [`DerefMut`](std::ops::DerefMut). The trick is to use
/// [`Self::release_mutex`] to release the mutex, but still keep the shared data locked. This means
/// every other thread trying to access the shared data in this time will need to wait until this
/// lock is freed.
///
/// If this object is dropped without calling [`Self::release_mutex`], the lock will be dropped
/// immediately.
@@ -210,8 +211,8 @@ impl<T> SharedData<T> {
///
/// This will give mutable access to the shared data. The returned [`SharedDataLocked`]
/// provides the function [`SharedDataLocked::release_mutex`] to release the mutex, but
/// keeping the data locked. This is useful in async contexts for example where the data needs to
/// be locked, but a mutex guard can not be held.
/// keeping the data locked. This is useful in async contexts for example where the data needs
/// to be locked, but a mutex guard can not be held.
///
/// For an example see [`SharedData`].
pub fn shared_data_locked(&self) -> SharedDataLocked<T> {
+2 -1
View File
@@ -615,7 +615,8 @@ where
&self.inner
}
/// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and `hash`.
/// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and
/// `hash`.
pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) {
self.inner = ForkTree::new();
self.epochs.clear();
@@ -209,8 +209,9 @@ where
{
vec![<DigestItemFor<B> as CompatibleDigestItem>::babe_pre_digest(predigest)]
} else {
// well we couldn't claim a slot because this is an existing chain and we're not in the authorities.
// we need to tell BabeBlockImport that the epoch has changed, and we put ourselves in the authorities.
// well we couldn't claim a slot because this is an existing chain and we're not in the
// authorities. we need to tell BabeBlockImport that the epoch has changed, and we put
// ourselves in the authorities.
let predigest =
PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0_u32 });
@@ -94,8 +94,8 @@ pub struct ManualSealParams<B: BlockT, BI, E, C: ProvideRuntimeApi<B>, TP, SC, C
/// Shared reference to the transaction pool.
pub pool: Arc<TP>,
/// Stream<Item = EngineCommands>, Basically the receiving end of a channel for sending commands to
/// the authorship task.
/// Stream<Item = EngineCommands>, Basically the receiving end of a channel for sending
/// commands to the authorship task.
pub commands_stream: CS,
/// SelectChain strategy.
@@ -281,7 +281,8 @@ mod tests {
0,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as transactions are imported into the pool.
// this test checks that blocks are created as soon as transactions are imported into the
// pool.
let (sender, receiver) = futures::channel::oneshot::channel();
let mut sender = Arc::new(Some(sender));
let commands_stream =
@@ -350,7 +351,8 @@ mod tests {
0,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as an engine command is sent over the stream.
// this test checks that blocks are created as soon as an engine command is sent over the
// stream.
let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024);
let future = run_manual_seal(ManualSealParams {
block_import: client.clone(),
@@ -427,7 +429,8 @@ mod tests {
0,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as an engine command is sent over the stream.
// this test checks that blocks are created as soon as an engine command is sent over the
// stream.
let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024);
let future = run_manual_seal(ManualSealParams {
block_import: client.clone(),
+2 -2
View File
@@ -591,8 +591,8 @@ where
return
}
// The worker is locked for the duration of the whole proposing period. Within this period,
// the mining target is outdated and useless anyway.
// The worker is locked for the duration of the whole proposing period. Within this
// period, the mining target is outdated and useless anyway.
let difficulty = match algorithm.difficulty(best_hash) {
Ok(x) => x,
+4 -2
View File
@@ -428,7 +428,8 @@ impl<B: BlockT, T: SimpleSlotWorker<B> + Send> SlotWorker<B, <T::Proposer as Pro
/// Slot specific extension that the inherent data provider needs to implement.
pub trait InherentDataProviderExt {
/// The current timestamp that will be found in the [`InherentData`](`sp_inherents::InherentData`).
/// The current timestamp that will be found in the
/// [`InherentData`](`sp_inherents::InherentData`).
fn timestamp(&self) -> Timestamp;
/// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`).
@@ -1059,7 +1060,8 @@ mod test {
})
.collect();
// Should always be true after a short while, since the chain is advancing but finality is stalled
// Should always be true after a short while, since the chain is advancing but finality is
// stalled
let expected: Vec<bool> = (slot_now..300).map(|s| s > 8).collect();
assert_eq!(should_backoff, expected);
}
+22 -19
View File
@@ -178,8 +178,8 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
} else {
// there are unfinalized entries
// => find the fork containing given block and read from this fork
// IF there's no matching fork, ensure that this isn't a block from a fork that has forked
// behind the best finalized block and search at finalized fork
// IF there's no matching fork, ensure that this isn't a block from a fork that has
// forked behind the best finalized block and search at finalized fork
match self.find_unfinalized_fork(&at)? {
Some(fork) => Some(&fork.head),
@@ -316,7 +316,8 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
return Ok(None)
}
// if the block is not final, it is possibly appended to/forking from existing unfinalized fork
// if the block is not final, it is possibly appended to/forking from existing unfinalized
// fork
let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis;
if !is_final {
let mut fork_and_action = None;
@@ -392,9 +393,10 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
}
// if we're here, then one of following is true:
// - either we're inserting final block => all ancestors are already finalized AND the only thing we can do
// is to try to update last finalized entry
// - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks
// - either we're inserting final block => all ancestors are already finalized AND the only
// thing we can do is to try to update last finalized entry
// - either we're inserting non-final blocks that has no ancestors in any known unfinalized
// forks
let new_storage_entry = match self.best_finalized_entry.as_ref() {
Some(best_finalized_entry) => best_finalized_entry.try_update(value),
@@ -1015,8 +1017,8 @@ mod tests {
.value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100))
.is_err());
// when block is later than last finalized block AND there are no forks AND finalized value is Some
// ---> [100] --- 200
// when block is later than last finalized block AND there are no forks AND finalized value
// is Some ---> [100] --- 200
assert_eq!(
ListCache::new(
DummyStorage::new()
@@ -1088,8 +1090,8 @@ mod tests {
None
);
// when block is later than last finalized block AND it appends to unfinalized fork from the end
// AND unfinalized value is Some
// when block is later than last finalized block AND it appends to unfinalized fork from the
// end AND unfinalized value is Some
// ---> [2] ---> [4] ---> 5
assert_eq!(
ListCache::new(
@@ -1170,8 +1172,8 @@ mod tests {
.unwrap()
.is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it appends to the best block of unfinalized
// fork AND new value is the same as in the fork' best block
let mut cache = ListCache::new(
DummyStorage::new()
.with_meta(None, vec![test_id(4)])
@@ -1198,8 +1200,8 @@ mod tests {
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it appends to the best block of unfinalized
// fork AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(
cache
@@ -1221,8 +1223,8 @@ mod tests {
Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })
);
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it is the first block that appends to the best
// block of unfinalized fork AND new value is the same as in the fork' best block
let cache = ListCache::new(
DummyStorage::new()
.with_meta(None, vec![correct_id(4)])
@@ -1249,8 +1251,8 @@ mod tests {
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it is the first block that appends to the best
// block of unfinalized fork AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(
cache
@@ -2204,7 +2206,8 @@ mod tests {
cache.prune_finalized_entries(&mut tx, &test_id(20));
assert!(tx.removed_entries().is_empty());
assert!(tx.inserted_entries().is_empty());
// when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled)
// when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is
// enabled)
cache.prune_finalized_entries(&mut tx, &test_id(30));
match strategy {
PruningStrategy::NeverPrune => {
@@ -59,8 +59,8 @@ pub fn extract_new_configuration<Header: HeaderT>(
.and_then(ChangesTrieSignal::as_new_configuration)
}
/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently
/// guaranteed because import lock is held during block import/finalization.
/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is
/// currently guaranteed because import lock is held during block import/finalization.
pub struct DbChangesTrieStorageTransaction<Block: BlockT> {
/// Cache operations that must be performed after db transaction is committed.
cache_ops: DbCacheTransactionOps<Block>,
@@ -110,12 +110,13 @@ struct ChangesTriesMeta<Block: BlockT> {
/// The range is inclusive from both sides.
/// Is None only if:
/// 1) we haven't yet finalized any blocks (except genesis)
/// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled
/// 3) changes tries pruning is disabled
/// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are
/// disabled 3) changes tries pruning is disabled
pub oldest_digest_range: Option<(NumberFor<Block>, NumberFor<Block>)>,
/// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range.
/// It is guaranteed that we have no any changes tries before (and including) this block.
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if created).
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if
/// created).
pub oldest_pruned_digest_range_end: NumberFor<Block>,
}
@@ -1131,8 +1132,8 @@ mod tests {
vec![3, 3],
);
// after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics),
// the 1st one points to the block #3 because it isn't truncated
// after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl
// specifics), the 1st one points to the block #3 because it isn't truncated
backend.revert(1, false).unwrap();
assert_eq!(
backend
+7 -5
View File
@@ -1067,8 +1067,8 @@ impl<T: Clone> FrozenForDuration<T> {
/// Disk backend.
///
/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks.
/// Otherwise, trie nodes are kept only from some recent blocks.
/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
pub struct Backend<Block: BlockT> {
storage: Arc<StorageDb<Block>>,
offchain_storage: offchain::LocalStorage,
@@ -1459,8 +1459,9 @@ impl<Block: BlockT> Backend<Block> {
if operation.commit_state {
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
} else {
// When we don't want to commit the genesis state, we still preserve it in memory
// to bootstrap consensus. It is queried for an initial list of authorities, etc.
// When we don't want to commit the genesis state, we still preserve it in
// memory to bootstrap consensus. It is queried for an initial list of
// authorities, etc.
*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
pending_block.header.state_root().clone(),
operation.db_updates.clone(),
@@ -3403,7 +3404,8 @@ pub(crate) mod tests {
let block5 = insert_header(&backend, 5, block4, None, Default::default());
assert_eq!(backend.blockchain().info().best_hash, block5);
// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best == 5
// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
// 5
let header = Header {
number: 1,
parent_hash: block0,
+2 -2
View File
@@ -829,8 +829,8 @@ pub(crate) mod tests {
assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size);
assert_eq!(raw_db.count(columns::CHT), 0);
// insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned
// nothing is yet finalized, so nothing is pruned.
// insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of
// this CHT are pruned nothing is yet finalized, so nothing is pruned.
prev_hash = insert_block(&db, HashMap::new(), || {
header_producer(&prev_hash, 1 + cht_size + cht_size)
});
+4 -4
View File
@@ -366,8 +366,8 @@ impl<B: BlockT> CacheChanges<B> {
}
cache.sync(&enacted, &retracted);
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
// blocks are ordered by number and only one block with a given number is marked as
// canonical (contributed to canonical state cache)
if let Some(_) = self.parent_hash {
let mut local_cache = self.local_cache.write();
if is_best {
@@ -463,8 +463,8 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> CachingState<S, B> {
}
}
/// Check if the key can be returned from cache by matching current block parent hash against canonical
/// state and filtering out entries modified in later blocks.
/// Check if the key can be returned from cache by matching current block parent hash against
/// canonical state and filtering out entries modified in later blocks.
fn is_allowed(
key: Option<&[u8]>,
child_key: Option<&ChildStorageKey>,
+2 -1
View File
@@ -306,7 +306,8 @@ fn open_kvdb_rocksdb<Block: BlockT>(
) -> OpenDbResult {
// first upgrade database to required version
match crate::upgrade::upgrade_db::<Block>(&path, db_type) {
// in case of missing version file, assume that database simply does not exist at given location
// in case of missing version file, assume that database simply does not exist at given
// location
Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (),
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()),
}
@@ -26,27 +26,28 @@
//!
//! To give you some examples:
//!
//! - wasmi allows reaching to non-exported mutable globals so that we could reset them.
//! Wasmtime doesnt support that.
//! - wasmi allows reaching to non-exported mutable globals so that we could reset them. Wasmtime
//! doesnt support that.
//!
//! We need to reset the globals because when we
//! execute the Substrate Runtime, we do not drop and create the instance anew, instead
//! we restore some selected parts of the state.
//!
//! - stack depth metering can be performed via instrumentation or deferred to the engine and say
//! be added directly in machine code. Implementing this in machine code is rather cumbersome so
//! - stack depth metering can be performed via instrumentation or deferred to the engine and say be
//! added directly in machine code. Implementing this in machine code is rather cumbersome so
//! instrumentation looks like a good solution.
//!
//! Stack depth metering is needed to make a wasm blob
//! execution deterministic, which in turn is needed by the Parachain Validation Function in Polkadot.
//! execution deterministic, which in turn is needed by the Parachain Validation Function in
//! Polkadot.
//!
//! ## Inspection
//!
//! Inspection of a wasm module may be needed to extract some useful information, such as to extract
//! data segment snapshot, which is helpful for quickly restoring the initial state of instances.
//! Inspection can be also useful to prove that a wasm module possesses some properties, such as,
//! is free of any floating point operations, which is a useful step towards making instances produced
//! from such a module deterministic.
//! is free of any floating point operations, which is a useful step towards making instances
//! produced from such a module deterministic.
mod data_segments_snapshot;
mod globals_snapshot;
@@ -72,11 +72,13 @@ impl RuntimeBlob {
export_mutable_globals(&mut self.raw_module, "exported_internal_global");
}
/// Run a pass that instrument this module so as to introduce a deterministic stack height limit.
/// Run a pass that instrument this module so as to introduce a deterministic stack height
/// limit.
///
/// It will introduce a global mutable counter. The instrumentation will increase the counter
/// according to the "cost" of the callee. If the cost exceeds the `stack_depth_limit` constant,
/// the instrumentation will trap. The counter will be decreased as soon as the the callee returns.
/// the instrumentation will trap. The counter will be decreased as soon as the the callee
/// returns.
///
/// The stack cost of a function is computed based on how much locals there are and the maximum
/// depth of the wasm operand stack.
@@ -89,7 +91,8 @@ impl RuntimeBlob {
Ok(Self { raw_module: injected_module })
}
/// Perform an instrumentation that makes sure that a specific function `entry_point` is exported
/// Perform an instrumentation that makes sure that a specific function `entry_point` is
/// exported
pub fn entry_point_exists(&self, entry_point: &str) -> bool {
self.raw_module
.export_section()
@@ -98,7 +98,8 @@ pub trait WasmInstance: Send {
///
/// This is meant to be the starting address of the memory mapped area for the linear memory.
///
/// This function is intended only for a specific test that measures physical memory consumption.
/// This function is intended only for a specific test that measures physical memory
/// consumption.
fn linear_memory_base_ptr(&self) -> Option<*const u8> {
None
}
@@ -197,8 +197,8 @@ impl WasmExecutor {
/// The runtime is passed as a [`RuntimeBlob`]. The runtime will be isntantiated with the
/// parameters this `WasmExecutor` was initialized with.
///
/// In case of problems with during creation of the runtime or instantation, a `Err` is returned.
/// that describes the message.
/// In case of problems with during creation of the runtime or instantation, a `Err` is
/// returned. that describes the message.
#[doc(hidden)] // We use this function for tests across multiple crates.
pub fn uncached_call(
&self,
@@ -456,8 +456,8 @@ impl RuntimeSpawn for RuntimeInstanceSpawn {
let _ = sender.send(output);
},
Err(error) => {
// If execution is panicked, the `join` in the original runtime code will panic as well,
// since the sender is dropped without sending anything.
// If execution is panicked, the `join` in the original runtime code will
// panic as well, since the sender is dropped without sending anything.
log::error!("Call error in spawned task: {:?}", error);
},
}
+13 -11
View File
@@ -304,8 +304,8 @@ pub fn create_wasm_runtime_with_code(
WasmExecutionMethod::Interpreted => {
// Wasmi doesn't have any need in a cache directory.
//
// We drop the cache_path here to silence warnings that cache_path is not used if compiling
// without the `wasmtime` flag.
// We drop the cache_path here to silence warnings that cache_path is not used if
// compiling without the `wasmtime` flag.
drop(cache_path);
sc_executor_wasmi::create_runtime(
@@ -361,8 +361,8 @@ fn decode_runtime_apis(apis: &[u8]) -> Result<Vec<([u8; 8], u32)>, WasmError> {
apis.chunks(RUNTIME_API_INFO_SIZE)
.map(|chunk| {
// `chunk` can be less than `RUNTIME_API_INFO_SIZE` if the total length of `apis` doesn't
// completely divide by `RUNTIME_API_INFO_SIZE`.
// `chunk` can be less than `RUNTIME_API_INFO_SIZE` if the total length of `apis`
// doesn't completely divide by `RUNTIME_API_INFO_SIZE`.
<[u8; RUNTIME_API_INFO_SIZE]>::try_from(chunk)
.map(sp_api::deserialize_runtime_api_info)
.map_err(|_| WasmError::Other("a clipped runtime api info declaration".to_owned()))
@@ -370,8 +370,8 @@ fn decode_runtime_apis(apis: &[u8]) -> Result<Vec<([u8; 8], u32)>, WasmError> {
.collect::<Result<Vec<_>, WasmError>>()
}
/// Take the runtime blob and scan it for the custom wasm sections containing the version information
/// and construct the `RuntimeVersion` from them.
/// Take the runtime blob and scan it for the custom wasm sections containing the version
/// information and construct the `RuntimeVersion` from them.
///
/// If there are no such sections, it returns `None`. If there is an error during decoding those
/// sections, `Err` will be returned.
@@ -380,8 +380,8 @@ pub fn read_embedded_version(blob: &RuntimeBlob) -> Result<Option<RuntimeVersion
// We do not use `decode_version` here because the runtime_version section is not supposed
// to ever contain a legacy version. Apart from that `decode_version` relies on presence
// of a special API in the `apis` field to treat the input as a non-legacy version. However
// the structure found in the `runtime_version` always contain an empty `apis` field. Therefore
// the version read will be mistakenly treated as an legacy one.
// the structure found in the `runtime_version` always contain an empty `apis` field.
// Therefore the version read will be mistakenly treated as an legacy one.
let mut decoded_version = sp_api::RuntimeVersion::decode(&mut version_section)
.map_err(|_| WasmError::Instantiation("failed to decode version section".into()))?;
@@ -411,8 +411,9 @@ fn create_versioned_wasm_runtime(
// the uncompressed code from now on.
let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(&code)?;
// Use the runtime blob to scan if there is any metadata embedded into the wasm binary pertaining
// to runtime version. We do it before consuming the runtime blob for creating the runtime.
// Use the runtime blob to scan if there is any metadata embedded into the wasm binary
// pertaining to runtime version. We do it before consuming the runtime blob for creating the
// runtime.
let mut version: Option<_> = read_embedded_version(&blob)?;
let runtime = create_wasm_runtime_with_code(
@@ -429,7 +430,8 @@ fn create_versioned_wasm_runtime(
if version.is_none() {
// Call to determine runtime version.
let version_result = {
// `ext` is already implicitly handled as unwind safe, as we store it in a global variable.
// `ext` is already implicitly handled as unwind safe, as we store it in a global
// variable.
let mut ext = AssertUnwindSafe(ext);
// The following unwind safety assertion is OK because if the method call panics, the
+2 -1
View File
@@ -687,7 +687,8 @@ pub struct WasmiInstance {
missing_functions: Vec<String>,
}
// This is safe because `WasmiInstance` does not leak any references to `self.memory` and `self.instance`
// This is safe because `WasmiInstance` does not leak any references to `self.memory` and
// `self.instance`
unsafe impl Send for WasmiInstance {}
impl WasmInstance for WasmiInstance {
@@ -98,8 +98,8 @@ pub struct InstanceWrapper {
instance: Instance,
// The memory instance of the `instance`.
//
// It is important to make sure that we don't make any copies of this to make it easier to proof
// See `memory_as_slice` and `memory_as_slice_mut`.
// It is important to make sure that we don't make any copies of this to make it easier to
// proof See `memory_as_slice` and `memory_as_slice_mut`.
memory: Memory,
table: Option<Table>,
// Make this struct explicitly !Send & !Sync.
@@ -399,9 +399,9 @@ impl InstanceWrapper {
self.memory.data_ptr()
}
/// Removes physical backing from the allocated linear memory. This leads to returning the memory
/// back to the system. While the memory is zeroed this is considered as a side-effect and is not
/// relied upon. Thus this function acts as a hint.
/// Removes physical backing from the allocated linear memory. This leads to returning the
/// memory back to the system. While the memory is zeroed this is considered as a side-effect
/// and is not relied upon. Thus this function acts as a hint.
pub fn decommit(&self) {
if self.memory.data_size() == 0 {
return
@@ -100,8 +100,8 @@ impl WasmModule for WasmtimeRuntime {
// and results.
//
// NOTE: Attentive reader may notice that this could've been moved in `WasmModule` creation.
// However, I am not sure if that's a good idea since it would be pushing our luck further
// by assuming that `Store` not only `Send` but also `Sync`.
// However, I am not sure if that's a good idea since it would be pushing our luck
// further by assuming that `Store` not only `Send` but also `Sync`.
let imports = resolve_imports(
&store,
&self.module,
@@ -115,10 +115,10 @@ impl WasmModule for WasmtimeRuntime {
InstanceWrapper::new(&store, &self.module, &imports, self.config.heap_pages)?;
let heap_base = instance_wrapper.extract_heap_base()?;
// This function panics if the instance was created from a runtime blob different from which
// the mutable globals were collected. Here, it is easy to see that there is only a single
// runtime blob and thus it's the same that was used for both creating the instance and
// collecting the mutable globals.
// This function panics if the instance was created from a runtime blob different from
// which the mutable globals were collected. Here, it is easy to see that there is only
// a single runtime blob and thus it's the same that was used for both creating the
// instance and collecting the mutable globals.
let globals_snapshot =
GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper);
@@ -291,10 +291,10 @@ fn common_config(semantics: &Semantics) -> std::result::Result<wasmtime::Config,
/// estimate of the actual stack limit in wasmtime. This is because wasmtime measures it's stack
/// usage in bytes.
///
/// The actual number of bytes consumed by a function is not trivial to compute without going through
/// full compilation. Therefore, it's expected that `native_stack_max` is grealy overestimated and
/// thus never reached in practice. The stack overflow check introduced by the instrumentation and
/// that relies on the logical item count should be reached first.
/// The actual number of bytes consumed by a function is not trivial to compute without going
/// through full compilation. Therefore, it's expected that `native_stack_max` is grealy
/// overestimated and thus never reached in practice. The stack overflow check introduced by the
/// instrumentation and that relies on the logical item count should be reached first.
///
/// See [here][stack_height] for more details of the instrumentation
///
@@ -307,12 +307,12 @@ pub struct DeterministicStackLimit {
pub logical_max: u32,
/// The maximum number of bytes for stack used by wasmtime JITed code.
///
/// It's not specified how much bytes will be consumed by a stack frame for a given wasm function
/// after translation into machine code. It is also not quite trivial.
/// It's not specified how much bytes will be consumed by a stack frame for a given wasm
/// function after translation into machine code. It is also not quite trivial.
///
/// Therefore, this number should be choosen conservatively. It must be so large so that it can
/// fit the [`logical_max`](Self::logical_max) logical values on the stack, according to the current
/// instrumentation algorithm.
/// fit the [`logical_max`](Self::logical_max) logical values on the stack, according to the
/// current instrumentation algorithm.
///
/// This value cannot be 0.
pub native_stack_max: u32,
@@ -335,9 +335,9 @@ pub struct Semantics {
// I.e. if [`CodeSupplyMode::Verbatim`] is used.
pub fast_instance_reuse: bool,
/// Specifiying `Some` will enable deterministic stack height. That is, all executor invocations
/// will reach stack overflow at the exactly same point across different wasmtime versions and
/// architectures.
/// Specifiying `Some` will enable deterministic stack height. That is, all executor
/// invocations will reach stack overflow at the exactly same point across different wasmtime
/// versions and architectures.
///
/// This is achieved by a combination of running an instrumentation pass on input code and
/// configuring wasmtime accordingly.
@@ -351,10 +351,10 @@ pub struct Semantics {
/// non-determinism.
///
/// By default, the wasm spec allows some local non-determinism wrt. certain floating point
/// operations. Specifically, those operations that are not defined to operate on bits (e.g. fneg)
/// can produce NaN values. The exact bit pattern for those is not specified and may depend
/// on the particular machine that executes wasmtime generated JITed machine code. That is
/// a source of non-deterministic values.
/// operations. Specifically, those operations that are not defined to operate on bits (e.g.
/// fneg) can produce NaN values. The exact bit pattern for those is not specified and may
/// depend on the particular machine that executes wasmtime generated JITed machine code. That
/// is a source of non-deterministic values.
///
/// The classical runtime environment for Substrate allowed it and punted this on the runtime
/// developers. For PVFs, we want to ensure that execution is deterministic though. Therefore,
@@ -368,11 +368,11 @@ pub struct Config {
/// The total number of wasm pages an instance can request.
///
/// If specified, the runtime will be able to allocate only that much of wasm memory pages. This
/// is the total number and therefore the [`heap_pages`] is accounted for.
/// If specified, the runtime will be able to allocate only that much of wasm memory pages.
/// This is the total number and therefore the [`heap_pages`] is accounted for.
///
/// That means that the initial number of pages of a linear memory plus the [`heap_pages`] should
/// be less or equal to `max_memory_pages`, otherwise the instance won't be created.
/// That means that the initial number of pages of a linear memory plus the [`heap_pages`]
/// should be less or equal to `max_memory_pages`, otherwise the instance won't be created.
///
/// Moreover, `memory.grow` will fail (return -1) if the sum of the number of currently mounted
/// pages and the number of additional pages exceeds `max_memory_pages`.
@@ -382,8 +382,8 @@ pub struct Config {
/// The WebAssembly standard requires all imports of an instantiated module to be resolved,
/// othewise, the instantiation fails. If this option is set to `true`, then this behavior is
/// overriden and imports that are requested by the module and not provided by the host functions
/// will be resolved using stubs. These stubs will trap upon a call.
/// overriden and imports that are requested by the module and not provided by the host
/// functions will be resolved using stubs. These stubs will trap upon a call.
pub allow_missing_func_imports: bool,
/// A directory in which wasmtime can store its compiled artifacts cache.
@@ -402,15 +402,16 @@ enum CodeSupplyMode<'a> {
// some instrumentations for both anticipated paths: substrate execution and PVF execution.
//
// Should there raise a need in performing no instrumentation and the client doesn't need
// to do any checks, then we can provide a `Cow` like semantics here: if we need the blob and
// the user got `RuntimeBlob` then extract it, or otherwise create it from the given
// to do any checks, then we can provide a `Cow` like semantics here: if we need the blob
// and the user got `RuntimeBlob` then extract it, or otherwise create it from the given
// bytecode.
blob: RuntimeBlob,
},
/// The code is supplied in a form of a compiled artifact.
///
/// This assumes that the code is already prepared for execution and the same `Config` was used.
/// This assumes that the code is already prepared for execution and the same `Config` was
/// used.
Artifact { compiled_artifact: &'a [u8] },
}
@@ -430,11 +431,12 @@ pub fn create_runtime(
///
/// # Safety
///
/// The caller must ensure that the compiled artifact passed here was produced by [`prepare_runtime_artifact`].
/// Otherwise, there is a risk of arbitrary code execution with all implications.
/// The caller must ensure that the compiled artifact passed here was produced by
/// [`prepare_runtime_artifact`]. Otherwise, there is a risk of arbitrary code execution with all
/// implications.
///
/// It is ok though if the `compiled_artifact` was created by code of another version or with different
/// configuration flags. In such case the caller will receive an `Err` deterministically.
/// It is ok though if the `compiled_artifact` was created by code of another version or with
/// different configuration flags. In such case the caller will receive an `Err` deterministically.
pub unsafe fn create_runtime_from_artifact(
compiled_artifact: &[u8],
config: Config,
@@ -445,8 +447,8 @@ pub unsafe fn create_runtime_from_artifact(
/// # Safety
///
/// This is only unsafe if called with [`CodeSupplyMode::Artifact`]. See [`create_runtime_from_artifact`]
/// to get more details.
/// This is only unsafe if called with [`CodeSupplyMode::Artifact`]. See
/// [`create_runtime_from_artifact`] to get more details.
unsafe fn do_create_runtime(
code_supply_mode: CodeSupplyMode<'_>,
config: Config,
@@ -44,7 +44,8 @@ pub fn from_wasmtime_val(val: wasmtime::Val) -> Value {
}
}
/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's [`wasmtime::Val`].
/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's
/// [`wasmtime::Val`].
pub fn into_wasmtime_val(value: Value) -> wasmtime::Val {
match value {
Value::I32(v) => wasmtime::Val::I32(v),
@@ -165,8 +165,8 @@ pub struct AuthoritySet<H, N> {
/// is lower than the last finalized block (as signaled in the forced
/// change) must be applied beforehand.
pending_forced_changes: Vec<PendingChange<H, N>>,
/// Track at which blocks the set id changed. This is useful when we need to prove finality for a
/// given block since we can figure out what set the block belongs to and when the set
/// Track at which blocks the set id changed. This is useful when we need to prove finality for
/// a given block since we can figure out what set the block belongs to and when the set
/// started/ended.
authority_set_changes: AuthoritySetChanges<N>,
}
@@ -657,16 +657,16 @@ impl<H, N: Add<Output = N> + Clone> PendingChange<H, N> {
pub struct AuthoritySetChanges<N>(Vec<(u64, N)>);
/// The response when querying for a set id for a specific block. Either we get a set id
/// together with a block number for the last block in the set, or that the requested block is in the
/// latest set, or that we don't know what set id the given block belongs to.
/// together with a block number for the last block in the set, or that the requested block is in
/// the latest set, or that we don't know what set id the given block belongs to.
#[derive(Debug, PartialEq)]
pub enum AuthoritySetChangeId<N> {
/// The requested block is in the latest set.
Latest,
/// Tuple containing the set id and the last block number of that set.
Set(SetId, N),
/// We don't know which set id the request block belongs to (this can only happen due to missing
/// data).
/// We don't know which set id the request block belongs to (this can only happen due to
/// missing data).
Unknown,
}
@@ -912,7 +912,8 @@ mod tests {
assert_eq!(authorities.pending_changes().collect::<Vec<_>>(), vec![&change_a, &change_b]);
// finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b"
// finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out
// "hash_b"
let status = authorities
.apply_standard_changes(
"hash_c",
@@ -483,8 +483,8 @@ struct Peers<N> {
/// gossiping.
first_stage_peers: HashSet<PeerId>,
/// The randomly picked set of peers we'll gossip to in the second stage of gossiping if the
/// first stage didn't allow us to spread the voting data enough to conclude the round. This set
/// should have size `sqrt(connected_peers)`.
/// first stage didn't allow us to spread the voting data enough to conclude the round. This
/// set should have size `sqrt(connected_peers)`.
second_stage_peers: HashSet<PeerId>,
/// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to.
lucky_light_peers: HashSet<PeerId>,
@@ -583,9 +583,11 @@ impl<N: Ord> Peers<N> {
fn reshuffle(&mut self) {
// we want to randomly select peers into three sets according to the following logic:
// - first set: LUCKY_PEERS random peers where at least LUCKY_PEERS/2 are authorities (unless
// - first set: LUCKY_PEERS random peers where at least LUCKY_PEERS/2 are authorities
// (unless
// we're not connected to that many authorities)
// - second set: max(LUCKY_PEERS, sqrt(peers)) peers where at least LUCKY_PEERS are authorities.
// - second set: max(LUCKY_PEERS, sqrt(peers)) peers where at least LUCKY_PEERS are
// authorities.
// - third set: LUCKY_PEERS random light client peers
let shuffled_peers = {
@@ -1220,8 +1222,10 @@ impl<Block: BlockT> Inner<Block> {
/// The initial logic for filtering round messages follows the given state
/// transitions:
///
/// - State 1: allowed to LUCKY_PEERS random peers (where at least LUCKY_PEERS/2 are authorities)
/// - State 2: allowed to max(LUCKY_PEERS, sqrt(random peers)) (where at least LUCKY_PEERS are authorities)
/// - State 1: allowed to LUCKY_PEERS random peers (where at least LUCKY_PEERS/2 are
/// authorities)
/// - State 2: allowed to max(LUCKY_PEERS, sqrt(random peers)) (where at least LUCKY_PEERS are
/// authorities)
/// - State 3: allowed to all peers
///
/// Transitions will be triggered on repropagation attempts by the underlying gossip layer.
@@ -1249,7 +1253,8 @@ impl<Block: BlockT> Inner<Block> {
/// The initial logic for filtering global messages follows the given state
/// transitions:
///
/// - State 1: allowed to max(LUCKY_PEERS, sqrt(peers)) (where at least LUCKY_PEERS are authorities)
/// - State 1: allowed to max(LUCKY_PEERS, sqrt(peers)) (where at least LUCKY_PEERS are
/// authorities)
/// - State 2: allowed to all peers
///
/// We are more lenient with global messages since there should be a lot
@@ -1625,7 +1630,8 @@ impl<Block: BlockT> sc_network_gossip::Validator<Block> for GossipValidator<Bloc
// it is expired.
match inner.live_topics.topic_info(&topic) {
None => return true,
Some((Some(_), _)) => return false, /* round messages don't require further checking. */
// round messages don't require further checking.
Some((Some(_), _)) => return false,
Some((None, _)) => {},
};
@@ -191,16 +191,16 @@ pub(crate) struct NetworkBridge<B: BlockT, N: Network<B>> {
neighbor_sender: periodic::NeighborPacketSender<B>,
/// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`.
// `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children,
// thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`.
// `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its
// children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`.
neighbor_packet_worker: Arc<Mutex<periodic::NeighborPacketWorker<B>>>,
/// Receiver side of the peer report stream populated by the gossip validator, forwarded to the
/// gossip engine.
// `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children,
// thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is
// just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer
// channel implementation.
// `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its
// children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given
// that it is just an `UnboundedReceiver`, one could also switch to a
// multi-producer-*multi*-consumer channel implementation.
gossip_validator_report_stream: Arc<Mutex<TracingUnboundedReceiver<PeerReport>>>,
telemetry: Option<TelemetryHandle>,
@@ -291,8 +291,8 @@ impl<B: BlockT, N: Network<B>> NetworkBridge<B, N> {
.note_round(round, |to, neighbor| self.neighbor_sender.send(to, neighbor));
}
/// Get a stream of signature-checked round messages from the network as well as a sink for round messages to the
/// network all within the current set.
/// Get a stream of signature-checked round messages from the network as well as a sink for
/// round messages to the network all within the current set.
pub(crate) fn round_communication(
&self,
keystore: Option<LocalIdKeystore>,
@@ -100,7 +100,8 @@ where
let mut out = Vec::new();
let chain_info = self.inner.info();
// request justifications for all pending changes for which change blocks have already been imported
// request justifications for all pending changes for which change blocks have already been
// imported
let pending_changes: Vec<_> =
self.authority_set.inner().pending_changes().cloned().collect();
@@ -171,7 +171,8 @@ impl<Block: BlockT> GrandpaJustification<Block> {
match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) {
Ok(route) => {
// ancestry starts from parent hash but the precommit target hash has been visited
// ancestry starts from parent hash but the precommit target hash has been
// visited
visited_hashes.insert(signed.precommit.target_hash);
for hash in route {
visited_hashes.insert(hash);
@@ -361,7 +361,8 @@ where
match Future::poll(Pin::new(&mut self.observer), cx) {
Poll::Pending => {},
Poll::Ready(Ok(())) => {
// observer commit stream doesn't conclude naturally; this could reasonably be an error.
// observer commit stream doesn't conclude naturally; this could reasonably be an
// error.
return Poll::Ready(Ok(()))
},
Poll::Ready(Err(CommandOrError::Error(e))) => {
@@ -188,8 +188,8 @@ impl<Block: BlockT> WarpSyncProof<Block> {
}
/// Verifies the warp sync proof starting at the given set id and with the given authorities.
/// Verification stops when either the proof is exhausted or finality for the target header can be proven.
/// If the proof is valid the new set id and authorities is returned.
/// Verification stops when either the proof is exhausted or finality for the target header can
/// be proven. If the proof is valid the new set id and authorities is returned.
fn verify(
&self,
set_id: SetId,
@@ -40,7 +40,6 @@ use wasm_timer::Instant;
///
/// Call `InformantDisplay::new` to initialize the state, then regularly call `display` with the
/// information to display.
///
pub struct InformantDisplay<B: BlockT> {
/// Head of chain block number from the last time `display` has been called.
/// `None` if `display` has never been called.
+4 -2
View File
@@ -52,11 +52,13 @@ impl Default for OutputFormat {
}
}
/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os = "unknown")`.
/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os =
/// "unknown")`.
#[cfg(target_os = "unknown")]
pub trait TransactionPoolAndMaybeMallogSizeOf: TransactionPool {}
/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os = "unknown")`.
/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os =
/// "unknown")`.
#[cfg(not(target_os = "unknown"))]
pub trait TransactionPoolAndMaybeMallogSizeOf: TransactionPool + MallocSizeOf {}
+2 -2
View File
@@ -521,8 +521,8 @@ impl KeystoreInner {
/// Get a key pair for the given public key.
///
/// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists or `Err(_)` when
/// something failed.
/// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists or `Err(_)`
/// when something failed.
pub fn key_pair<Pair: AppPair>(
&self,
public: &<Pair as AppKey>::Public,
@@ -64,8 +64,8 @@ enum ForwardingState<B: BlockT> {
/// more messages to forward.
Idle,
/// The gossip engine is in the progress of forwarding messages and thus will not poll the
/// network for more messages until it has send all current messages into the subscribed message
/// sinks.
/// network for more messages until it has send all current messages into the subscribed
/// message sinks.
Busy(VecDeque<(B::Hash, TopicNotification)>),
}
+2 -2
View File
@@ -32,8 +32,8 @@
//!
//! # Usage
//!
//! - Implement the `Network` trait, representing the low-level networking primitives. It is
//! already implemented on `sc_network::NetworkService`.
//! - Implement the `Network` trait, representing the low-level networking primitives. It is already
//! implemented on `sc_network::NetworkService`.
//! - Implement the `Validator` trait. See the section below.
//! - Decide on a protocol name. Each gossiping protocol should have a different one.
//! - Build a `GossipEngine` using these three elements.
+4 -2
View File
@@ -302,12 +302,14 @@ impl<B: BlockT> Behaviour<B> {
&mut self.substrate
}
/// Start querying a record from the DHT. Will later produce either a `ValueFound` or a `ValueNotFound` event.
/// Start querying a record from the DHT. Will later produce either a `ValueFound` or a
/// `ValueNotFound` event.
pub fn get_value(&mut self, key: &record::Key) {
self.discovery.get_value(key);
}
/// Starts putting a record into DHT. Will later produce either a `ValuePut` or a `ValuePutFailed` event.
/// Starts putting a record into DHT. Will later produce either a `ValuePut` or a
/// `ValuePutFailed` event.
pub fn put_value(&mut self, key: record::Key, value: Vec<u8>) {
self.discovery.put_value(key, value);
}
@@ -294,12 +294,12 @@ impl<B: BlockT> BlockRequestHandler<B> {
};
(justifications, Vec::new(), false)
} else {
// For now we keep compatibility by selecting precisely the GRANDPA one, and not just
// the first one. When sending we could have just taken the first one, since we don't
// expect there to be any other kind currently, but when receiving we need to add the
// engine ID tag.
// The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and will be
// removed once we remove the backwards compatibility.
// For now we keep compatibility by selecting precisely the GRANDPA one, and not
// just the first one. When sending we could have just taken the first one,
// since we don't expect there to be any other kind currently, but when
// receiving we need to add the engine ID tag.
// The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and
// will be removed once we remove the backwards compatibility.
// See: https://github.com/paritytech/substrate/issues/8172
let justification =
justifications.and_then(|just| just.into_justification(*b"FRNK"));
+13 -12
View File
@@ -112,8 +112,8 @@ pub struct Params<B: BlockT, H: ExHashT> {
/// Request response configuration for the block request protocol.
///
/// [`RequestResponseConfig::name`] is used to tag outgoing block requests with the correct
/// protocol name. In addition all of [`RequestResponseConfig`] is used to handle incoming block
/// requests, if enabled.
/// protocol name. In addition all of [`RequestResponseConfig`] is used to handle incoming
/// block requests, if enabled.
///
/// Can be constructed either via [`crate::block_request_handler::generate_protocol_config`]
/// allowing outgoing but not incoming requests, or constructed via
@@ -272,7 +272,6 @@ impl fmt::Debug for ProtocolId {
/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::<PeerId>().unwrap());
/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::<Multiaddr>().unwrap());
/// ```
///
pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> {
let addr: Multiaddr = addr_str.parse()?;
parse_addr(addr)
@@ -506,7 +505,8 @@ impl NetworkConfiguration {
}
}
/// Create new default configuration for localhost-only connection with random port (useful for testing)
/// Create new default configuration for localhost-only connection with random port (useful for
/// testing)
pub fn new_local() -> NetworkConfiguration {
let mut config =
NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
@@ -520,7 +520,8 @@ impl NetworkConfiguration {
config
}
/// Create new default configuration for localhost-only connection with random port (useful for testing)
/// Create new default configuration for localhost-only connection with random port (useful for
/// testing)
pub fn new_memory() -> NetworkConfiguration {
let mut config =
NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
@@ -629,8 +630,8 @@ pub enum TransportConfig {
allow_private_ipv4: bool,
/// Optional external implementation of a libp2p transport. Used in WASM contexts where we
/// need some binding between the networking provided by the operating system or environment
/// and libp2p.
/// need some binding between the networking provided by the operating system or
/// environment and libp2p.
///
/// This parameter exists whatever the target platform is, but it is expected to be set to
/// `Some` only when compiling for WASM.
@@ -710,12 +711,12 @@ impl NodeKeyConfig {
///
/// * If the secret is configured as input, the corresponding keypair is returned.
///
/// * If the secret is configured as a file, it is read from that file, if it exists.
/// Otherwise a new secret is generated and stored. In either case, the
/// keypair obtained from the secret is returned.
/// * If the secret is configured as a file, it is read from that file, if it exists. Otherwise
/// a new secret is generated and stored. In either case, the keypair obtained from the
/// secret is returned.
///
/// * If the secret is configured to be new, it is generated and the corresponding
/// keypair is returned.
/// * If the secret is configured to be new, it is generated and the corresponding keypair is
/// returned.
pub fn into_keypair(self) -> io::Result<Keypair> {
use NodeKeyConfig::*;
match self {
+2 -1
View File
@@ -1002,7 +1002,8 @@ mod tests {
match e {
DiscoveryOut::UnroutablePeer(other) |
DiscoveryOut::Discovered(other) => {
// Call `add_self_reported_address` to simulate identify happening.
// Call `add_self_reported_address` to simulate identify
// happening.
let addr = swarms
.iter()
.find_map(|(s, a)| {
+4 -5
View File
@@ -121,10 +121,10 @@
//!
//! - **`/substrate/<protocol-id>/<version>`** (where `<protocol-id>` must be replaced with the
//! protocol ID of the targeted chain, and `<version>` is a number between 2 and 6). For each
//! connection we optionally keep an additional substream for all Substrate-based communications alive.
//! This protocol is considered legacy, and is progressively being replaced with alternatives.
//! This is designated as "The legacy Substrate substream" in this documentation. See below for
//! more details.
//! connection we optionally keep an additional substream for all Substrate-based communications
//! alive. This protocol is considered legacy, and is progressively being replaced with
//! alternatives. This is designated as "The legacy Substrate substream" in this documentation. See
//! below for more details.
//! - **`/<protocol-id>/sync/2`** is a request-response protocol (see below) that lets one perform
//! requests for information about blocks. Each request is the encoding of a `BlockRequest` and
//! each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in
@@ -243,7 +243,6 @@
//! - Calling `trigger_repropagate` when a transaction is added to the pool.
//!
//! More precise usage details are still being worked on and will likely change in the future.
//!
mod behaviour;
mod chain;
@@ -36,7 +36,8 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String {
s
}
/// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming requests.
/// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming
/// requests.
pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig {
ProtocolConfig {
name: generate_protocol_name(protocol_id).into(),
@@ -26,8 +26,9 @@
//! 2. Forward the request to [`crate::request_responses::RequestResponsesBehaviour`] via
//! [`OutEvent::SendRequest`](sender::OutEvent::SendRequest).
//!
//! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`] provided earlier
//! with [`LightClientRequestSender::request`](sender::LightClientRequestSender::request).
//! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`]
//! provided earlier with [`LightClientRequestSender::request`](sender::LightClientRequestSender::
//! request).
use crate::{
config::ProtocolId,
@@ -16,7 +16,8 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Network packet message types. These get serialized and put into the lower level protocol payload.
//! Network packet message types. These get serialized and put into the lower level protocol
//! payload.
pub use self::generic::{
BlockAnnounce, FromBlock, RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse,
@@ -392,7 +393,8 @@ pub mod generic {
pub to: Option<Hash>,
/// Sequence direction.
pub direction: Direction,
/// Maximum number of blocks to return. An implementation defined maximum is used when unspecified.
/// Maximum number of blocks to return. An implementation defined maximum is used when
/// unspecified.
pub max: Option<u32>,
}
@@ -63,8 +63,8 @@ use wasm_timer::Instant;
/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream
/// is open.
/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset.
/// - Notifications substreams are open on at least one connection, and external
/// API has been notified.
/// - Notifications substreams are open on at least one connection, and external API has been
/// notified.
/// - Notifications substreams aren't open.
/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams.
/// Peerset has been asked to attribute an inbound slot.
@@ -1255,8 +1255,8 @@ impl NetworkBehaviour for Notifications {
.iter()
.any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote));
// If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming
// request.
// If no connection is `OpenDesiredByRemote` anymore, clean up the peerset
// incoming request.
if no_desired_left {
// In the incoming state, we don't report "Dropped". Instead we will just
// ignore the corresponding Accept/Reject.
@@ -21,17 +21,17 @@ use asynchronous_codec::Framed;
///
/// The Substrate notifications protocol consists in the following:
///
/// - Node A opens a substream to node B and sends a message which contains some protocol-specific
/// higher-level logic. This message is prefixed with a variable-length integer message length.
/// This message can be empty, in which case `0` is sent.
/// - Node A opens a substream to node B and sends a message which contains some
/// protocol-specific higher-level logic. This message is prefixed with a variable-length
/// integer message length. This message can be empty, in which case `0` is sent.
/// - If node B accepts the substream, it sends back a message with the same properties.
/// - If instead B refuses the connection (which typically happens because no empty slot is
/// available), then it immediately closes the substream without sending back anything.
/// - Node A can then send notifications to B, prefixed with a variable-length integer indicating
/// the length of the message.
/// - Either node A or node B can signal that it doesn't want this notifications substream anymore
/// by closing its writing side. The other party should respond by also closing their own
/// writing side soon after.
/// - Node A can then send notifications to B, prefixed with a variable-length integer
/// indicating the length of the message.
/// - Either node A or node B can signal that it doesn't want this notifications substream
/// anymore by closing its writing side. The other party should respond by also closing their
/// own writing side soon after.
///
/// Notification substreams are unidirectional. If A opens a substream with B, then B is
/// encouraged but not required to open a substream to A as well.
+21 -20
View File
@@ -27,7 +27,6 @@
//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on
//! the network, or whenever a block has been successfully verified, call the appropriate method in
//! order to update it.
//!
use crate::{
protocol::message::{self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse},
@@ -900,8 +899,8 @@ impl<B: BlockT> ChainSync<B> {
// If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from the
// common number, the peer best number is higher than our best queued and the common
// number is smaller than the last finalized block number, we should do an ancestor
// search to find a better common block. If the queue is full we wait till all blocks are
// imported though.
// search to find a better common block. If the queue is full we wait till all blocks
// are imported though.
if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() &&
best_queued < peer.best_number &&
peer.common_number < last_finalized &&
@@ -1149,8 +1148,8 @@ impl<B: BlockT> ChainSync<B> {
ancestry_request::<B>(next_num),
))
} else {
// Ancestry search is complete. Check if peer is on a stale fork unknown to us and
// add it to sync targets if necessary.
// Ancestry search is complete. Check if peer is on a stale fork unknown
// to us and add it to sync targets if necessary.
trace!(
target: "sync",
"Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})",
@@ -1774,8 +1773,8 @@ impl<B: BlockT> ChainSync<B> {
///
/// This should be polled until it returns [`Poll::Pending`].
///
/// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to import passed
/// header (call `on_block_data`). The network request isn't sent in this case.
/// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to
/// import passed header (call `on_block_data`). The network request isn't sent in this case.
pub fn poll_block_announce_validation(
&mut self,
cx: &mut std::task::Context,
@@ -2002,7 +2001,8 @@ impl<B: BlockT> ChainSync<B> {
})
}
/// Find a block to start sync from. If we sync with state, that's the latest block we have state for.
/// Find a block to start sync from. If we sync with state, that's the latest block we have
/// state for.
fn reset_sync_start_point(&mut self) -> Result<(), ClientError> {
let info = self.client.info();
if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() {
@@ -2132,8 +2132,8 @@ fn ancestry_request<B: BlockT>(block: NumberFor<B>) -> BlockRequest<B> {
}
}
/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using to
/// try to find an ancestor block
/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using
/// to try to find an ancestor block
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum AncestorSearchState<B: BlockT> {
/// Use exponential backoff to find an ancestor, then switch to binary search.
@@ -2161,7 +2161,8 @@ fn handle_ancestor_search_state<B: BlockT>(
AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => {
let next_distance_to_tip = *next_distance_to_tip;
if block_hash_match && next_distance_to_tip == One::one() {
// We found the ancestor in the first step so there is no need to execute binary search.
// We found the ancestor in the first step so there is no need to execute binary
// search.
return None
}
if block_hash_match {
@@ -2645,13 +2646,13 @@ mod test {
/// This test is a regression test as observed on a real network.
///
/// The node is connected to multiple peers. Both of these peers are having a best block (1) that
/// is below our best block (3). Now peer 2 announces a fork of block 3 that we will
/// The node is connected to multiple peers. Both of these peers are having a best block (1)
/// that is below our best block (3). Now peer 2 announces a fork of block 3 that we will
/// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4.
/// But as peer 1 in our view is still at block 1, we will request block 2 (which we already have)
/// from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request for block
/// 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to succeed, as we
/// have requested block 2 from both peers.
/// But as peer 1 in our view is still at block 1, we will request block 2 (which we already
/// have) from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request
/// for block 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to
/// succeed, as we have requested block 2 from both peers.
#[test]
fn do_not_report_peer_on_block_response_for_block_request() {
sp_tracing::try_init_simple();
@@ -2756,9 +2757,9 @@ mod test {
///
/// The scenario is that the node is doing a full resync and is connected to some node that is
/// doing a major sync as well. This other node that is doing a major sync will finish before
/// our node and send a block announcement message, but we don't have seen any block announcement
/// from this node in its sync process. Meaning our common number didn't change. It is now expected
/// that we start an ancestor search to find the common number.
/// our node and send a block announcement message, but we don't have seen any block
/// announcement from this node in its sync process. Meaning our common number didn't change. It
/// is now expected that we start an ancestor search to find the common number.
#[test]
fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() {
sp_tracing::try_init_simple();
@@ -98,7 +98,8 @@ impl<B: BlockT> BlockCollection<B> {
);
}
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
/// Returns a set of block hashes that require a header download. The returned set is marked as
/// being downloaded.
pub fn needed_blocks(
&mut self,
who: PeerId,
@@ -171,7 +172,8 @@ impl<B: BlockT> BlockCollection<B> {
Some(range)
}
/// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain.
/// Get a valid chain of blocks ordered in descending order and ready for importing into
/// blockchain.
pub fn drain(&mut self, from: NumberFor<B>) -> Vec<BlockData<B>> {
let mut drained = Vec::new();
let mut ranges = Vec::new();
@@ -176,8 +176,8 @@ impl<B: BlockT> ExtraRequests<B> {
}
if best_finalized_number > self.best_seen_finalized_number {
// normally we'll receive finality notifications for every block => finalize would be enough
// but if many blocks are finalized at once, some notifications may be omitted
// normally we'll receive finality notifications for every block => finalize would be
// enough but if many blocks are finalized at once, some notifications may be omitted
// => let's use finalize_with_ancestors here
match self.tree.finalize_with_ancestors(
best_finalized_hash,
@@ -315,7 +315,8 @@ impl<'a, B: BlockT> Matcher<'a, B> {
for (peer, sync) in
peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available)
{
// only ask peers that have synced at least up to the block number that we're asking the extra for
// only ask peers that have synced at least up to the block number that we're asking
// the extra for
if sync.best_number < request.1 {
continue
}
@@ -310,7 +310,8 @@ impl RequestResponsesBehaviour {
/// Initiates sending a request.
///
/// If there is no established connection to the target peer, the behavior is determined by the choice of `connect`.
/// If there is no established connection to the target peer, the behavior is determined by the
/// choice of `connect`.
///
/// An error is returned if the protocol doesn't match one that has been registered.
pub fn send_request(
@@ -700,8 +701,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out))
},
// An inbound request failed, either while reading the request or due to failing
// to send a response.
// An inbound request failed, either while reading the request or due to
// failing to send a response.
RequestResponseEvent::InboundFailure {
request_id, peer, error, ..
} => {
+20 -12
View File
@@ -729,7 +729,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
/// > preventing the message from being delivered.
///
/// The protocol must have been registered with
/// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols).
/// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::
/// notifications_protocols).
pub fn write_notification(
&self,
target: PeerId,
@@ -774,7 +775,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
/// Obtains a [`NotificationSender`] for a connected peer, if it exists.
///
/// A `NotificationSender` is scoped to a particular connection to the peer that holds
/// a receiver. With a `NotificationSender` at hand, sending a notification is done in two steps:
/// a receiver. With a `NotificationSender` at hand, sending a notification is done in two
/// steps:
///
/// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready
/// for another notification, yielding a [`NotificationSenderReady`] token.
@@ -794,7 +796,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
/// in which case enqueued notifications will be lost.
///
/// The protocol must have been registered with
/// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols).
/// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::
/// notifications_protocols).
///
/// # Usage
///
@@ -883,10 +886,10 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
/// notifications should remain the default ways of communicating information. For example, a
/// peer can announce something through a notification, after which the recipient can obtain
/// more information by performing a request.
/// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way you
/// will get an error immediately for disconnected peers, instead of waiting for a potentially very
/// long connection attempt, which would suggest that something is wrong anyway, as you are
/// supposed to be connected because of the notification protocol.
/// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way
/// you will get an error immediately for disconnected peers, instead of waiting for a
/// potentially very long connection attempt, which would suggest that something is wrong
/// anyway, as you are supposed to be connected because of the notification protocol.
///
/// No limit or throttling of concurrent outbound requests per peer and protocol are enforced.
/// Such restrictions, if desired, need to be enforced at the call site(s).
@@ -914,7 +917,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
}
}
/// Variation of `request` which starts a request whose response is delivered on a provided channel.
/// Variation of `request` which starts a request whose response is delivered on a provided
/// channel.
///
/// Instead of blocking and waiting for a reply, this function returns immediately, sending
/// responses via the passed in sender. This alternative API exists to make it easier to
@@ -1130,7 +1134,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
///
/// Returns an `Err` if one of the given addresses is invalid or contains an
/// invalid peer ID (which includes the local peer ID).
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for convenience.
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for
// convenience.
pub fn remove_peers_from_reserved_set(
&self,
protocol: Cow<'static, str>,
@@ -1198,7 +1203,8 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
///
/// Returns an `Err` if one of the given addresses is invalid or contains an
/// invalid peer ID (which includes the local peer ID).
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for convenience.
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for
// convenience.
pub fn remove_from_peers_set(
&self,
protocol: Cow<'static, str>,
@@ -1314,7 +1320,8 @@ pub struct NotificationSender {
}
impl NotificationSender {
/// Returns a future that resolves when the `NotificationSender` is ready to send a notification.
/// Returns a future that resolves when the `NotificationSender` is ready to send a
/// notification.
pub async fn ready<'a>(
&'a self,
) -> Result<NotificationSenderReady<'a>, NotificationSenderError> {
@@ -1371,7 +1378,8 @@ impl<'a> NotificationSenderReady<'a> {
/// Error returned by [`NetworkService::send_notification`].
#[derive(Debug, derive_more::Display, derive_more::Error)]
pub enum NotificationSenderError {
/// The notification receiver has been closed, usually because the underlying connection closed.
/// The notification receiver has been closed, usually because the underlying connection
/// closed.
///
/// Some of the notifications most recently sent may not have been received. However,
/// the peer may still be connected and a new `NotificationSender` for the same
@@ -49,7 +49,8 @@ pub enum VerificationResult<Block: BlockT> {
/// Warp sync backend. Handles retrieveing and verifying warp sync proofs.
pub trait WarpSyncProvider<B: BlockT>: Send + Sync {
/// Generate proof starting at given block hash. The proof is accumulated until maximum proof size is reached.
/// Generate proof starting at given block hash. The proof is accumulated until maximum proof
/// size is reached.
fn generate(
&self,
start: B::Hash,
@@ -61,11 +62,13 @@ pub trait WarpSyncProvider<B: BlockT>: Send + Sync {
set_id: SetId,
authorities: AuthorityList,
) -> Result<VerificationResult<B>, Box<dyn std::error::Error + Send + Sync>>;
/// Get current list of authorities. This is supposed to be genesis authorities when starting sync.
/// Get current list of authorities. This is supposed to be genesis authorities when starting
/// sync.
fn current_authorities(&self) -> AuthorityList;
}
/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing incoming requests.
/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing
/// incoming requests.
pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig {
RequestResponseConfig {
name: generate_protocol_name(protocol_id).into(),
@@ -25,7 +25,8 @@ use std::{
task::{Context, Poll},
};
/// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client running.
/// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client
/// running.
#[derive(Clone)]
pub struct SharedClient;
+2 -2
View File
@@ -74,8 +74,8 @@ pub trait AuthorApi<Hash, BlockHash> {
/// Submit an extrinsic to watch.
///
/// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on transaction
/// life cycle.
/// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on
/// transaction life cycle.
#[pubsub(
subscription = "author_extrinsicUpdate",
subscribe,
+8 -4
View File
@@ -89,7 +89,8 @@ pub trait StateApi<Hash> {
#[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))]
fn runtime_version(&self, hash: Option<Hash>) -> FutureResult<RuntimeVersion>;
/// Query historical storage entries (by key) starting from a block given as the second parameter.
/// Query historical storage entries (by key) starting from a block given as the second
/// parameter.
///
/// NOTE This first returned result contains the initial state of storage for all keys.
/// Subsequent values in the vector represent changes to the previous state (diffs).
@@ -176,7 +177,8 @@ pub trait StateApi<Hash> {
/// ## Node requirements
///
/// - Fully synced archive node (i.e. a node that is not actively doing a "major" sync).
/// - [Tracing enabled WASM runtimes](#creating-tracing-enabled-wasm-runtimes) for all runtime versions
/// - [Tracing enabled WASM runtimes](#creating-tracing-enabled-wasm-runtimes) for all runtime
/// versions
/// for which tracing is desired.
///
/// ## Node recommendations
@@ -192,10 +194,12 @@ pub trait StateApi<Hash> {
/// - Add feature `with-tracing = ["frame-executive/with-tracing", "sp-io/with-tracing"]`
/// under `[features]` to the `runtime` packages' `Cargo.toml`.
/// - Compile the runtime with `cargo build --release --features with-tracing`
/// - Tracing-enabled WASM runtime should be found in `./target/release/wbuild/{{chain}}-runtime`
/// - Tracing-enabled WASM runtime should be found in
/// `./target/release/wbuild/{{chain}}-runtime`
/// and be called something like `{{your_chain}}_runtime.compact.wasm`. This can be
/// renamed/modified however you like, as long as it retains the `.wasm` extension.
/// - Run the node with the wasm blob overrides by placing them in a folder with all your runtimes,
/// - Run the node with the wasm blob overrides by placing them in a folder with all your
/// runtimes,
/// and passing the path of this folder to your chain, e.g.:
/// - `./target/release/polkadot --wasm-runtime-overrides /home/user/my-custom-wasm-runtimes`
///
+2 -1
View File
@@ -215,7 +215,8 @@ where
},
Err(err) => {
warn!("Failed to submit extrinsic: {}", err);
// reject the subscriber (ignore errors - we don't care if subscriber is no longer there).
// reject the subscriber (ignore errors - we don't care if subscriber is no
// longer there).
let _ = subscriber.reject(err.into());
},
});
+2 -1
View File
@@ -119,7 +119,8 @@ where
/// Get the runtime version.
fn runtime_version(&self, block: Option<Block::Hash>) -> FutureResult<RuntimeVersion>;
/// Query historical storage entries (by key) starting from a block given as the second parameter.
/// Query historical storage entries (by key) starting from a block given as the second
/// parameter.
///
/// NOTE This first returned result contains the initial state of storage for all keys.
/// Subsequent values in the vector represent changes to the previous state (diffs).
+2 -1
View File
@@ -208,7 +208,8 @@ where
Ok(())
}
/// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes.
/// Iterates through all blocks that are changing keys within range.filtered_range and collects
/// these changes.
fn query_storage_filtered(
&self,
range: &QueryStorageRange<Block>,
+3 -2
View File
@@ -254,8 +254,9 @@ impl KeystoreContainer {
///
/// # Note
///
/// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore implementation, like
/// a remote keystore for example. Only use this if you a certain that you require it!
/// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore
/// implementation, like a remote keystore for example. Only use this if you a certain that you
/// require it!
pub fn local_keystore(&self) -> Option<Arc<LocalKeystore>> {
Some(self.local.clone())
}
@@ -272,8 +272,8 @@ where
{
/// We are reading from the BlockIter structure, adding those blocks to the queue if possible.
Reading { block_iter: BlockIter<R, B> },
/// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to
/// catch up.
/// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it
/// to catch up.
WaitingForImportQueueToCatchUp {
block_iter: BlockIter<R, B>,
delay: Delay,
@@ -253,7 +253,6 @@ where
)
.set_parent_hash(at_hash);
// TODO: https://github.com/paritytech/substrate/issues/4455
// .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c))
state_machine.execute_using_consensus_failure_handler(
execution_manager,
native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)),
@@ -827,8 +827,8 @@ where
let state_root = operation.op.reset_storage(storage)?;
if state_root != *import_headers.post().state_root() {
// State root mismatch when importing state. This should not happen in safe fast sync mode,
// but may happen in unsafe mode.
// State root mismatch when importing state. This should not happen in
// safe fast sync mode, but may happen in unsafe mode.
warn!("Error imporing state: State root mismatch.");
return Err(Error::InvalidStateRoot)
}
+2 -1
View File
@@ -120,7 +120,8 @@ pub struct Configuration {
pub disable_grandpa: bool,
/// Development key seed.
///
/// When running in development mode, the seed will be used to generate authority keys by the keystore.
/// When running in development mode, the seed will be used to generate authority keys by the
/// keystore.
///
/// Should only be set when `node` is running development mode.
pub dev_key_seed: Option<String>,
+6 -3
View File
@@ -342,7 +342,8 @@ mod waiting {
}
}
/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive.
/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them
/// alive.
#[cfg(not(target_os = "unknown"))]
fn start_rpc_servers<
H: FnMut(
@@ -428,7 +429,8 @@ fn start_rpc_servers<
)))
}
/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive.
/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them
/// alive.
#[cfg(target_os = "unknown")]
fn start_rpc_servers<
H: FnMut(
@@ -539,7 +541,8 @@ where
},
Err(e) => {
debug!("Error converting pool error: {:?}", e);
// it is not bad at least, just some internal node logic error, so peer is innocent.
// it is not bad at least, just some internal node logic error, so peer is
// innocent.
TransactionImport::KnownGood
},
},
@@ -1826,7 +1826,8 @@ fn imports_blocks_with_changes_tries_config_change() {
// blocks 24,25 are changing the key
// block 26 is empty
// block 27 changes the key
// block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1
// block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to
// `3^1`
// ===================================================================
// block 29 is empty
// block 30 changes the key
+11 -8
View File
@@ -25,15 +25,17 @@
//! There's a limit of 32 blocks that may have the same block number in the canonicalization window.
//!
//! Canonicalization function selects one root from the top of the tree and discards all other roots
//! and their subtrees. Upon canonicalization all trie nodes that were inserted in the block are added to
//! the backing DB and block tracking is moved to the pruning window, where no forks are allowed.
//! and their subtrees. Upon canonicalization all trie nodes that were inserted in the block are
//! added to the backing DB and block tracking is moved to the pruning window, where no forks are
//! allowed.
//!
//! # Canonicalization vs Finality
//! Database engine uses a notion of canonicality, rather then finality. A canonical block may not be yet finalized
//! from the perspective of the consensus engine, but it still can't be reverted in the database. Most of the time
//! during normal operation last canonical block is the same as last finalized. However if finality stall for a
//! long duration for some reason, there's only a certain number of blocks that can fit in the non-canonical overlay,
//! so canonicalization of an unfinalized block may be forced.
//! Database engine uses a notion of canonicality, rather then finality. A canonical block may not
//! be yet finalized from the perspective of the consensus engine, but it still can't be reverted in
//! the database. Most of the time during normal operation last canonical block is the same as last
//! finalized. However if finality stall for a long duration for some reason, there's only a certain
//! number of blocks that can fit in the non-canonical overlay, so canonicalization of an
//! unfinalized block may be forced.
//!
//! # Pruning.
//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until
@@ -177,7 +179,8 @@ pub struct CommitSet<H: Hash> {
/// Pruning constraints. If none are specified pruning is
#[derive(Default, Debug, Clone, Eq, PartialEq)]
pub struct Constraints {
/// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states.
/// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical
/// states.
pub max_blocks: Option<u32>,
/// Maximum memory in the pruning overlay.
pub max_mem: Option<usize>,
+14 -5
View File
@@ -241,7 +241,8 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
})
}
/// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window.
/// Insert a new block into the overlay. If inserted on the second level or lover expects parent
/// to be present in the window.
pub fn insert<E: fmt::Debug>(
&mut self,
hash: &BlockHash,
@@ -501,7 +502,8 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
!self.pending_canonicalizations.contains(hash)
}
/// Revert a single level. Returns commit set that deletes the journal or `None` if not possible.
/// Revert a single level. Returns commit set that deletes the journal or `None` if not
/// possible.
pub fn revert_one(&mut self) -> Option<CommitSet<Key>> {
self.levels.pop_back().map(|level| {
let mut commit = CommitSet::default();
@@ -514,7 +516,8 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
})
}
/// Revert a single block. Returns commit set that deletes the journal or `None` if not possible.
/// Revert a single block. Returns commit set that deletes the journal or `None` if not
/// possible.
pub fn remove(&mut self, hash: &BlockHash) -> Option<CommitSet<Key>> {
let mut commit = CommitSet::default();
let level_count = self.levels.len();
@@ -548,7 +551,8 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
self.pending_insertions.reverse();
for hash in self.pending_insertions.drain(..) {
self.parents.remove(&hash);
// find a level. When iterating insertions backwards the hash is always last in the level.
// find a level. When iterating insertions backwards the hash is always last in the
// level.
let level_index = self
.levels
.iter()
@@ -870,6 +874,7 @@ mod tests {
fn complex_tree() {
let mut db = make_db(&[]);
#[rustfmt::skip]
// - 1 - 1_1 - 1_1_1
// \ 1_2 - 1_2_1
// \ 1_2_2
@@ -1027,6 +1032,7 @@ mod tests {
fn keeps_pinned() {
let mut db = make_db(&[]);
#[rustfmt::skip]
// - 0 - 1_1
// \ 1_2
@@ -1053,6 +1059,7 @@ mod tests {
fn keeps_pinned_ref_count() {
let mut db = make_db(&[]);
#[rustfmt::skip]
// - 0 - 1_1
// \ 1_2
// \ 1_3
@@ -1084,6 +1091,7 @@ mod tests {
fn pin_keeps_parent() {
let mut db = make_db(&[]);
#[rustfmt::skip]
// - 0 - 1_1 - 2_1
// \ 1_2
@@ -1178,7 +1186,8 @@ mod tests {
db.commit(&commit);
overlay.apply_pending();
// add another block at top level. It should reuse journal index 0 of previously discarded block
// add another block at top level. It should reuse journal index 0 of previously discarded
// block
let h22 = H256::random();
db.commit(&overlay.insert::<io::Error>(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap());
assert_eq!(overlay.levels[0].blocks[0].journal_index, 1);
+4 -3
View File
@@ -219,9 +219,10 @@ impl<BlockHash: Hash, Key: Hash> RefWindow<BlockHash, Key> {
/// Revert all pending changes
pub fn revert_pending(&mut self) {
// Revert pending deletions.
// Note that pending insertions might cause some existing deletions to be removed from `death_index`
// We don't bother to track and revert that for now. This means that a few nodes might end up no being
// deleted in case transaction fails and `revert_pending` is called.
// Note that pending insertions might cause some existing deletions to be removed from
// `death_index` We don't bother to track and revert that for now. This means that a few
// nodes might end up no being deleted in case transaction fails and `revert_pending` is
// called.
self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations);
if self.count_insertions {
let new_max_block = self.death_rows.len() as u64 + self.pending_number;
+4 -4
View File
@@ -29,10 +29,10 @@
//! identify which substrate node is reporting the telemetry. Every task spawned using sc-service's
//! `TaskManager` automatically inherit this span.
//!
//! Substrate's nodes initialize/register with the [`TelemetryWorker`] using a [`TelemetryWorkerHandle`].
//! This handle can be cloned and passed around. It uses an asynchronous channel to communicate with
//! the running [`TelemetryWorker`] dedicated to registration. Registering can happen at any point
//! in time during the process execution.
//! Substrate's nodes initialize/register with the [`TelemetryWorker`] using a
//! [`TelemetryWorkerHandle`]. This handle can be cloned and passed around. It uses an asynchronous
//! channel to communicate with the running [`TelemetryWorker`] dedicated to registration.
//! Registering can happen at any point in time during the process execution.
#![warn(missing_docs)]
+8 -8
View File
@@ -39,14 +39,14 @@ pub(crate) fn connection_notifier_channel() -> (ConnectionNotifierSender, Connec
/// Handler for a single telemetry node.
///
/// This is a wrapper `Sink` around a network `Sink` with 3 particularities:
/// - It is infallible: if the connection stops, it will reconnect automatically when the server
/// becomes available again.
/// - It holds a list of "connection messages" which are sent automatically when the connection is
/// (re-)established. This is used for the "system.connected" message that needs to be send for
/// every substrate node that connects.
/// - It doesn't stay in pending while waiting for connection. Instead, it moves data into the
/// void if the connection could not be established. This is important for the `Dispatcher`
/// `Sink` which we don't want to block if one connection is broken.
/// - It is infallible: if the connection stops, it will reconnect automatically when the server
/// becomes available again.
/// - It holds a list of "connection messages" which are sent automatically when the connection is
/// (re-)established. This is used for the "system.connected" message that needs to be send for
/// every substrate node that connects.
/// - It doesn't stay in pending while waiting for connection. Instead, it moves data into the void
/// if the connection could not be established. This is important for the `Dispatcher` `Sink`
/// which we don't want to block if one connection is broken.
#[derive(Debug)]
pub(crate) struct Node<TTrans: Transport> {
/// Address of the node.
@@ -203,7 +203,8 @@ pub trait TransactionPool: Send + Sync {
xt: TransactionFor<Self>,
) -> PoolFuture<TxHash<Self>, Self::Error>;
/// Returns a future that import a single transaction and starts to watch their progress in the pool.
/// Returns a future that import a single transaction and starts to watch their progress in the
/// pool.
fn submit_and_watch(
&self,
at: &BlockId<Self::Block>,
@@ -322,7 +322,8 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: std::fmt::Debug> BasePool<Hash,
if !first {
promoted.push(current_hash);
}
// The transactions were removed from the ready pool. We might attempt to re-import them.
// The transactions were removed from the ready pool. We might attempt to
// re-import them.
removed.append(&mut replaced);
},
// transaction failed to be imported.
@@ -382,9 +383,10 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: std::fmt::Debug> BasePool<Hash,
/// Makes sure that the transactions in the queues stay within provided limits.
///
/// Removes and returns worst transactions from the queues and all transactions that depend on them.
/// Technically the worst transaction should be evaluated by computing the entire pending set.
/// We use a simplified approach to remove the transaction that occupies the pool for the longest time.
/// Removes and returns worst transactions from the queues and all transactions that depend on
/// them. Technically the worst transaction should be evaluated by computing the entire pending
/// set. We use a simplified approach to remove the transaction that occupies the pool for the
/// longest time.
pub fn enforce_limits(
&mut self,
ready: &Limit,
@@ -262,7 +262,8 @@ impl<B: ChainApi> Pool<B> {
extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::<Vec<_>>();
let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes);
// Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option<Vec<Tag>>)`)
// Zip the ones from the pool with the full list (we get pairs `(Extrinsic,
// Option<Vec<Tag>>)`)
let all = extrinsics.iter().zip(in_pool_tags.into_iter());
let mut future_tags = Vec::new();
@@ -1112,13 +1113,14 @@ mod tests {
block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap();
assert_eq!(pool.validated_pool().status().ready, 1);
// Now block import happens before the second transaction is able to finish verification.
// Now block import happens before the second transaction is able to finish
// verification.
block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap();
assert_eq!(pool.validated_pool().status().ready, 0);
// so when we release the verification of the previous one it will have
// something in `requires`, but should go to ready directly, since the previous transaction was imported
// correctly.
// something in `requires`, but should go to ready directly, since the previous
// transaction was imported correctly.
tx.send(()).unwrap();
// then
@@ -114,7 +114,8 @@ pub struct ReadyTransactions<Hash: hash::Hash + Eq, Ex> {
provided_tags: HashMap<Tag, Hash>,
/// Transactions that are ready (i.e. don't have any requirements external to the pool)
ready: TrackedMap<Hash, ReadyTx<Hash, Ex>>,
/// Best transactions that are ready to be included to the block without any other previous transaction.
/// Best transactions that are ready to be included to the block without any other previous
/// transaction.
best: BTreeSet<TransactionRef<Hash, Ex>>,
}
@@ -145,10 +146,12 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
///
/// Transactions are returned in order:
/// 1. First by the dependencies:
/// - never return transaction that requires a tag, which was not provided by one of the previously
/// - never return transaction that requires a tag, which was not provided by one of the
/// previously
/// returned transactions
/// 2. Then by priority:
/// - If there are two transactions with all requirements satisfied the one with higher priority goes first.
/// - If there are two transactions with all requirements satisfied the one with higher priority
/// goes first.
/// 3. Then by the ttl that's left
/// - transactions that are valid for a shorter time go first
/// 4. Lastly we sort by the time in the queue
@@ -252,8 +255,8 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
/// Removes a subtree of transactions from the ready pool.
///
/// NOTE removing a transaction will also cause a removal of all transactions that depend on that one
/// (i.e. the entire subgraph that this transaction is a start of will be removed).
/// NOTE removing a transaction will also cause a removal of all transactions that depend on
/// that one (i.e. the entire subgraph that this transaction is a start of will be removed).
/// All removed transactions are returned.
pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec<Arc<Transaction<Hash, Ex>>> {
let to_remove = hashes.to_vec();
@@ -393,8 +393,9 @@ impl<B: ChainApi> ValidatedPool<B> {
},
Err(err) => {
// we do not want to fail if single transaction import has failed
// nor we do want to propagate this error, because it could tx unknown to caller
// => let's just notify listeners (and issue debug message)
// nor we do want to propagate this error, because it could tx
// unknown to caller => let's just notify listeners (and issue debug
// message)
log::warn!(
target: "txpool",
"[{:?}] Removing invalid transaction from update: {}",
@@ -490,7 +491,8 @@ impl<B: ChainApi> ValidatedPool<B> {
// Resubmit pruned transactions
let results = self.submit(pruned_xts);
// Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned).
// Collect the hashes of transactions that now became invalid (meaning that they are
// successfully pruned).
let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| {
match r.map_err(error::IntoPoolError::into_pool_error) {
Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx]),
+2 -1
View File
@@ -439,7 +439,8 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
// Burn any dust if needed.
if let Some(burn) = maybe_burn {
// Debit dust from supply; this will not saturate since it's already checked in prep.
// Debit dust from supply; this will not saturate since it's already checked in
// prep.
debug_assert!(details.supply >= burn, "checked in prep; qed");
details.supply = details.supply.saturating_sub(burn);
}
+10 -8
View File
@@ -43,8 +43,8 @@
//! account that issues the asset. This is a privileged operation.
//! * **Asset transfer**: The reduction of the balance of an asset of one account with the
//! corresponding increase in the balance of another.
//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is
//! a privileged operation.
//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is a
//! privileged operation.
//! * **Fungible asset**: An asset whose units are interchangeable.
//! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets.
//! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from
@@ -54,8 +54,8 @@
//! * **Non-fungible asset**: An asset for which each unit has unique characteristics.
//! * **Owner**: An account ID uniquely privileged to be able to destroy a particular asset class,
//! or to set the Issuer, Freezer or Admin of that asset class.
//! * **Approval**: The act of allowing an account the permission to transfer some
//! balance of asset from the approving account into some third-party destination account.
//! * **Approval**: The act of allowing an account the permission to transfer some balance of asset
//! from the approving account into some third-party destination account.
//! * **Sufficiency**: The idea of a minimum-balance of an asset being sufficient to allow the
//! account's existence on the system without requiring any other existential-deposit.
//!
@@ -104,7 +104,8 @@
//! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's
//! Owner.
//!
//! Please refer to the [`Call`] enum and its associated variants for documentation on each function.
//! Please refer to the [`Call`] enum and its associated variants for documentation on each
//! function.
//!
//! ### Public Functions
//! <!-- Original author of descriptions: @gavofyork -->
@@ -339,7 +340,8 @@ pub mod pallet {
BadWitness,
/// Minimum balance should be non-zero.
MinBalanceZero,
/// No provider reference exists to allow a non-zero balance of a non-self-sufficient asset.
/// No provider reference exists to allow a non-zero balance of a non-self-sufficient
/// asset.
NoProvider,
/// Invalid metadata given.
BadMetadata,
@@ -418,8 +420,8 @@ pub mod pallet {
/// - `id`: The identifier of the new asset. This must not be currently in use to identify
/// an existing asset.
/// - `owner`: The owner of this class of assets. The owner has full superuser permissions
/// over this asset, but may later change and configure the permissions using `transfer_ownership`
/// and `set_team`.
/// over this asset, but may later change and configure the permissions using
/// `transfer_ownership` and `set_team`.
/// - `min_balance`: The minimum balance of this new asset that any single account must
/// have. If an account's balance is reduced below this, then it collapses to zero.
///
+6 -4
View File
@@ -183,7 +183,8 @@ pub enum ConversionError {
MinBalanceZero,
/// The asset is not present in storage.
AssetMissing,
/// The asset is not sufficient and thus does not have a reliable `min_balance` so it cannot be converted.
/// The asset is not sufficient and thus does not have a reliable `min_balance` so it cannot be
/// converted.
AssetNotSufficient,
}
@@ -210,10 +211,11 @@ where
{
type Error = ConversionError;
/// Convert the given balance value into an asset balance based on the ratio between the fungible's
/// minimum balance and the minimum asset balance.
/// Convert the given balance value into an asset balance based on the ratio between the
/// fungible's minimum balance and the minimum asset balance.
///
/// Will return `Err` if the asset is not found, not sufficient or the fungible's minimum balance is zero.
/// Will return `Err` if the asset is not found, not sufficient or the fungible's minimum
/// balance is zero.
fn to_asset_balance(
balance: BalanceOf<F, T>,
asset_id: AssetIdOf<T, I>,
+5 -5
View File
@@ -165,14 +165,14 @@ pub mod pallet {
type SwapAction: SwapAction<Self::AccountId, Self> + Parameter;
/// Limit of proof size.
///
/// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs
/// on-chain. If A is the one that generates the proof, then it requires that either:
/// Atomic swap is only atomic if once the proof is revealed, both parties can submit the
/// proofs on-chain. If A is the one that generates the proof, then it requires that either:
/// - A's blockchain has the same proof length limit as B's blockchain.
/// - Or A's blockchain has shorter proof length limit as B's blockchain.
///
/// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse
/// to accept the atomic swap request if A generates the proof, and asks that B generates the
/// proof instead.
/// If B sees A is on a blockchain with larger proof length limit, then it should kindly
/// refuse to accept the atomic swap request if A generates the proof, and asks that B
/// generates the proof instead.
#[pallet::constant]
type ProofLimit: Get<u32>;
}
+4 -2
View File
@@ -28,7 +28,8 @@
//!
//! ### Public Functions
//!
//! - `slot_duration` - Determine the Aura slot-duration based on the Timestamp module configuration.
//! - `slot_duration` - Determine the Aura slot-duration based on the Timestamp module
//! configuration.
//!
//! ## Related Modules
//!
@@ -99,7 +100,8 @@ pub mod pallet {
}
}
// TODO [#3398] Generate offence report for all authorities that skipped their slots.
// TODO [#3398] Generate offence report for all authorities that skipped their
// slots.
T::DbWeight::get().reads_writes(2, 1)
} else {
+2 -2
View File
@@ -140,8 +140,8 @@ pub mod pallet {
/// further constraints on what uncles can be included, other than their ancestry.
///
/// For PoW, as long as the seals are checked, there is no need to use anything
/// but the `VerifySeal` implementation as the filter. This is because the cost of making many equivocating
/// uncles is high.
/// but the `VerifySeal` implementation as the filter. This is because the cost of making
/// many equivocating uncles is high.
///
/// For PoS, there is no such limitation, so a further constraint must be imposed
/// beyond a seal check in order to prevent an arbitrary number of

Some files were not shown because too many files have changed in this diff Show More