Rewrap all comments to 100 line width (#9490)

* reformat everything again

* manual formatting

* last manual fix

* Fix build
This commit is contained in:
Kian Paimani
2021-08-11 16:56:55 +02:00
committed by GitHub
parent 8180c58700
commit abd08e29ce
258 changed files with 1776 additions and 1447 deletions
@@ -322,7 +322,8 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: std::fmt::Debug> BasePool<Hash,
if !first {
promoted.push(current_hash);
}
// The transactions were removed from the ready pool. We might attempt to re-import them.
// The transactions were removed from the ready pool. We might attempt to
// re-import them.
removed.append(&mut replaced);
},
// transaction failed to be imported.
@@ -382,9 +383,10 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: std::fmt::Debug> BasePool<Hash,
/// Makes sure that the transactions in the queues stay within provided limits.
///
/// Removes and returns worst transactions from the queues and all transactions that depend on them.
/// Technically the worst transaction should be evaluated by computing the entire pending set.
/// We use a simplified approach to remove the transaction that occupies the pool for the longest time.
/// Removes and returns worst transactions from the queues and all transactions that depend on
/// them. Technically the worst transaction should be evaluated by computing the entire pending
/// set. We use a simplified approach to remove the transaction that occupies the pool for the
/// longest time.
pub fn enforce_limits(
&mut self,
ready: &Limit,
@@ -262,7 +262,8 @@ impl<B: ChainApi> Pool<B> {
extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::<Vec<_>>();
let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes);
// Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option<Vec<Tag>>)`)
// Zip the ones from the pool with the full list (we get pairs `(Extrinsic,
// Option<Vec<Tag>>)`)
let all = extrinsics.iter().zip(in_pool_tags.into_iter());
let mut future_tags = Vec::new();
@@ -1112,13 +1113,14 @@ mod tests {
block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap();
assert_eq!(pool.validated_pool().status().ready, 1);
// Now block import happens before the second transaction is able to finish verification.
// Now block import happens before the second transaction is able to finish
// verification.
block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap();
assert_eq!(pool.validated_pool().status().ready, 0);
// so when we release the verification of the previous one it will have
// something in `requires`, but should go to ready directly, since the previous transaction was imported
// correctly.
// something in `requires`, but should go to ready directly, since the previous
// transaction was imported correctly.
tx.send(()).unwrap();
// then
@@ -114,7 +114,8 @@ pub struct ReadyTransactions<Hash: hash::Hash + Eq, Ex> {
provided_tags: HashMap<Tag, Hash>,
/// Transactions that are ready (i.e. don't have any requirements external to the pool)
ready: TrackedMap<Hash, ReadyTx<Hash, Ex>>,
/// Best transactions that are ready to be included to the block without any other previous transaction.
/// Best transactions that are ready to be included to the block without any other previous
/// transaction.
best: BTreeSet<TransactionRef<Hash, Ex>>,
}
@@ -145,10 +146,12 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
///
/// Transactions are returned in order:
/// 1. First by the dependencies:
/// - never return transaction that requires a tag, which was not provided by one of the previously
/// - never return transaction that requires a tag, which was not provided by one of the
/// previously
/// returned transactions
/// 2. Then by priority:
/// - If there are two transactions with all requirements satisfied the one with higher priority goes first.
/// - If there are two transactions with all requirements satisfied the one with higher priority
/// goes first.
/// 3. Then by the ttl that's left
/// - transactions that are valid for a shorter time go first
/// 4. Lastly we sort by the time in the queue
@@ -252,8 +255,8 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
/// Removes a subtree of transactions from the ready pool.
///
/// NOTE removing a transaction will also cause a removal of all transactions that depend on that one
/// (i.e. the entire subgraph that this transaction is a start of will be removed).
/// NOTE removing a transaction will also cause a removal of all transactions that depend on
/// that one (i.e. the entire subgraph that this transaction is a start of will be removed).
/// All removed transactions are returned.
pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec<Arc<Transaction<Hash, Ex>>> {
let to_remove = hashes.to_vec();
@@ -393,8 +393,9 @@ impl<B: ChainApi> ValidatedPool<B> {
},
Err(err) => {
// we do not want to fail if single transaction import has failed
// nor we do want to propagate this error, because it could tx unknown to caller
// => let's just notify listeners (and issue debug message)
// nor we do want to propagate this error, because it could tx
// unknown to caller => let's just notify listeners (and issue debug
// message)
log::warn!(
target: "txpool",
"[{:?}] Removing invalid transaction from update: {}",
@@ -490,7 +491,8 @@ impl<B: ChainApi> ValidatedPool<B> {
// Resubmit pruned transactions
let results = self.submit(pruned_xts);
// Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned).
// Collect the hashes of transactions that now became invalid (meaning that they are
// successfully pruned).
let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| {
match r.map_err(error::IntoPoolError::into_pool_error) {
Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx]),