Rewrap all comments to 100 line width (#9490)

* reformat everything again

* manual formatting

* last manual fix

* Fix build
This commit is contained in:
Kian Paimani
2021-08-11 16:56:55 +02:00
committed by GitHub
parent 8180c58700
commit abd08e29ce
258 changed files with 1776 additions and 1447 deletions
+22 -19
View File
@@ -178,8 +178,8 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
} else {
// there are unfinalized entries
// => find the fork containing given block and read from this fork
// IF there's no matching fork, ensure that this isn't a block from a fork that has forked
// behind the best finalized block and search at finalized fork
// IF there's no matching fork, ensure that this isn't a block from a fork that has
// forked behind the best finalized block and search at finalized fork
match self.find_unfinalized_fork(&at)? {
Some(fork) => Some(&fork.head),
@@ -316,7 +316,8 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
return Ok(None)
}
// if the block is not final, it is possibly appended to/forking from existing unfinalized fork
// if the block is not final, it is possibly appended to/forking from existing unfinalized
// fork
let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis;
if !is_final {
let mut fork_and_action = None;
@@ -392,9 +393,10 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
}
// if we're here, then one of following is true:
// - either we're inserting final block => all ancestors are already finalized AND the only thing we can do
// is to try to update last finalized entry
// - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks
// - either we're inserting final block => all ancestors are already finalized AND the only
// thing we can do is to try to update last finalized entry
// - either we're inserting non-final blocks that has no ancestors in any known unfinalized
// forks
let new_storage_entry = match self.best_finalized_entry.as_ref() {
Some(best_finalized_entry) => best_finalized_entry.try_update(value),
@@ -1015,8 +1017,8 @@ mod tests {
.value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100))
.is_err());
// when block is later than last finalized block AND there are no forks AND finalized value is Some
// ---> [100] --- 200
// when block is later than last finalized block AND there are no forks AND finalized value
// is Some ---> [100] --- 200
assert_eq!(
ListCache::new(
DummyStorage::new()
@@ -1088,8 +1090,8 @@ mod tests {
None
);
// when block is later than last finalized block AND it appends to unfinalized fork from the end
// AND unfinalized value is Some
// when block is later than last finalized block AND it appends to unfinalized fork from the
// end AND unfinalized value is Some
// ---> [2] ---> [4] ---> 5
assert_eq!(
ListCache::new(
@@ -1170,8 +1172,8 @@ mod tests {
.unwrap()
.is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it appends to the best block of unfinalized
// fork AND new value is the same as in the fork' best block
let mut cache = ListCache::new(
DummyStorage::new()
.with_meta(None, vec![test_id(4)])
@@ -1198,8 +1200,8 @@ mod tests {
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it appends to the best block of unfinalized
// fork AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(
cache
@@ -1221,8 +1223,8 @@ mod tests {
Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })
);
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it is the first block that appends to the best
// block of unfinalized fork AND new value is the same as in the fork' best block
let cache = ListCache::new(
DummyStorage::new()
.with_meta(None, vec![correct_id(4)])
@@ -1249,8 +1251,8 @@ mod tests {
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
// when trying to insert non-final block AND it is the first block that appends to the best
// block of unfinalized fork AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(
cache
@@ -2204,7 +2206,8 @@ mod tests {
cache.prune_finalized_entries(&mut tx, &test_id(20));
assert!(tx.removed_entries().is_empty());
assert!(tx.inserted_entries().is_empty());
// when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled)
// when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is
// enabled)
cache.prune_finalized_entries(&mut tx, &test_id(30));
match strategy {
PruningStrategy::NeverPrune => {
@@ -59,8 +59,8 @@ pub fn extract_new_configuration<Header: HeaderT>(
.and_then(ChangesTrieSignal::as_new_configuration)
}
/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently
/// guaranteed because import lock is held during block import/finalization.
/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is
/// currently guaranteed because import lock is held during block import/finalization.
pub struct DbChangesTrieStorageTransaction<Block: BlockT> {
/// Cache operations that must be performed after db transaction is committed.
cache_ops: DbCacheTransactionOps<Block>,
@@ -110,12 +110,13 @@ struct ChangesTriesMeta<Block: BlockT> {
/// The range is inclusive from both sides.
/// Is None only if:
/// 1) we haven't yet finalized any blocks (except genesis)
/// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled
/// 3) changes tries pruning is disabled
/// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are
/// disabled 3) changes tries pruning is disabled
pub oldest_digest_range: Option<(NumberFor<Block>, NumberFor<Block>)>,
/// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range.
/// It is guaranteed that we have no any changes tries before (and including) this block.
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if created).
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if
/// created).
pub oldest_pruned_digest_range_end: NumberFor<Block>,
}
@@ -1131,8 +1132,8 @@ mod tests {
vec![3, 3],
);
// after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics),
// the 1st one points to the block #3 because it isn't truncated
// after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl
// specifics), the 1st one points to the block #3 because it isn't truncated
backend.revert(1, false).unwrap();
assert_eq!(
backend
+7 -5
View File
@@ -1067,8 +1067,8 @@ impl<T: Clone> FrozenForDuration<T> {
/// Disk backend.
///
/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks.
/// Otherwise, trie nodes are kept only from some recent blocks.
/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
pub struct Backend<Block: BlockT> {
storage: Arc<StorageDb<Block>>,
offchain_storage: offchain::LocalStorage,
@@ -1459,8 +1459,9 @@ impl<Block: BlockT> Backend<Block> {
if operation.commit_state {
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
} else {
// When we don't want to commit the genesis state, we still preserve it in memory
// to bootstrap consensus. It is queried for an initial list of authorities, etc.
// When we don't want to commit the genesis state, we still preserve it in
// memory to bootstrap consensus. It is queried for an initial list of
// authorities, etc.
*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
pending_block.header.state_root().clone(),
operation.db_updates.clone(),
@@ -3403,7 +3404,8 @@ pub(crate) mod tests {
let block5 = insert_header(&backend, 5, block4, None, Default::default());
assert_eq!(backend.blockchain().info().best_hash, block5);
// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best == 5
// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
// 5
let header = Header {
number: 1,
parent_hash: block0,
+2 -2
View File
@@ -829,8 +829,8 @@ pub(crate) mod tests {
assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size);
assert_eq!(raw_db.count(columns::CHT), 0);
// insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned
// nothing is yet finalized, so nothing is pruned.
// insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of
// this CHT are pruned nothing is yet finalized, so nothing is pruned.
prev_hash = insert_block(&db, HashMap::new(), || {
header_producer(&prev_hash, 1 + cht_size + cht_size)
});
+4 -4
View File
@@ -366,8 +366,8 @@ impl<B: BlockT> CacheChanges<B> {
}
cache.sync(&enacted, &retracted);
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
// blocks are ordered by number and only one block with a given number is marked as
// canonical (contributed to canonical state cache)
if let Some(_) = self.parent_hash {
let mut local_cache = self.local_cache.write();
if is_best {
@@ -463,8 +463,8 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> CachingState<S, B> {
}
}
/// Check if the key can be returned from cache by matching current block parent hash against canonical
/// state and filtering out entries modified in later blocks.
/// Check if the key can be returned from cache by matching current block parent hash against
/// canonical state and filtering out entries modified in later blocks.
fn is_allowed(
key: Option<&[u8]>,
child_key: Option<&ChildStorageKey>,
+2 -1
View File
@@ -306,7 +306,8 @@ fn open_kvdb_rocksdb<Block: BlockT>(
) -> OpenDbResult {
// first upgrade database to required version
match crate::upgrade::upgrade_db::<Block>(&path, db_type) {
// in case of missing version file, assume that database simply does not exist at given location
// in case of missing version file, assume that database simply does not exist at given
// location
Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (),
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()),
}