Run cargo fmt on the whole code base (#9394)

* Run cargo fmt on the whole code base

* Second run

* Add CI check

* Fix compilation

* More unnecessary braces

* Handle weights

* Use --all

* Use correct attributes...

* Fix UI tests

* AHHHHHHHHH

* 🤦

* Docs

* Fix compilation

* 🤷

* Please stop

* 🤦 x 2

* More

* make rustfmt.toml consistent with polkadot

Co-authored-by: André Silva <andrerfosilva@gmail.com>
This commit is contained in:
Bastian Köcher
2021-07-21 16:32:32 +02:00
committed by GitHub
parent d451c38c1c
commit 7b56ab15b4
1010 changed files with 53339 additions and 51208 deletions
File diff suppressed because it is too large Load Diff
+54 -34
View File
@@ -18,12 +18,11 @@
//! List-cache storage entries.
use codec::{Decode, Encode};
use sp_blockchain::Result as ClientResult;
use sp_runtime::traits::{Block as BlockT, NumberFor};
use codec::{Encode, Decode};
use crate::cache::{CacheItemT, ComplexBlockId};
use crate::cache::list_storage::{Storage};
use crate::cache::{list_storage::Storage, CacheItemT, ComplexBlockId};
/// Single list-based cache entry.
#[derive(Debug)]
@@ -52,10 +51,8 @@ impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
match value {
Some(value) => match self.value == value {
true => None,
false => Some(StorageEntry {
prev_valid_from: Some(self.valid_from.clone()),
value,
}),
false =>
Some(StorageEntry { prev_valid_from: Some(self.valid_from.clone()), value }),
},
None => None,
}
@@ -67,7 +64,8 @@ impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
storage: &S,
block: NumberFor<Block>,
) -> ClientResult<Option<(ComplexBlockId<Block>, Option<ComplexBlockId<Block>>)>> {
Ok(self.search_best_before(storage, block)?
Ok(self
.search_best_before(storage, block)?
.map(|(entry, next)| (entry.valid_from, next)))
}
@@ -86,14 +84,14 @@ impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
let mut current = self.valid_from.clone();
if block >= self.valid_from.number {
let value = self.value.clone();
return Ok(Some((Entry { valid_from: current, value }, next)));
return Ok(Some((Entry { valid_from: current, value }, next)))
}
// else - travel back in time
loop {
let entry = storage.require_entry(&current)?;
if block >= current.number {
return Ok(Some((Entry { valid_from: current, value: entry.value }, next)));
return Ok(Some((Entry { valid_from: current, value: entry.value }, next)))
}
next = Some(current);
@@ -108,18 +106,15 @@ impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
impl<Block: BlockT, T: CacheItemT> StorageEntry<Block, T> {
/// Converts storage entry into an entry, valid from given block.
pub fn into_entry(self, valid_from: ComplexBlockId<Block>) -> Entry<Block, T> {
Entry {
valid_from,
value: self.value,
}
Entry { valid_from, value: self.value }
}
}
#[cfg(test)]
mod tests {
use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage};
use substrate_test_runtime_client::runtime::{H256, Block};
use super::*;
use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage};
use substrate_test_runtime_client::runtime::{Block, H256};
fn test_id(number: u64) -> ComplexBlockId<Block> {
ComplexBlockId::new(H256::from_low_u64_be(number), number)
@@ -132,36 +127,61 @@ mod tests {
// when trying to update with the same Some value
assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None);
// when trying to update with different Some value
assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)),
Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 }));
assert_eq!(
Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)),
Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 })
);
}
#[test]
fn entry_search_best_before_fails() {
// when storage returns error
assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 }
.search_best_before(&FaultyStorage, 50).is_err());
.search_best_before(&FaultyStorage, 50)
.is_err());
}
#[test]
fn entry_search_best_before_works() {
// when block is better than our best block
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(&DummyStorage::new(), 150).unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None)));
assert_eq!(
Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(&DummyStorage::new(), 150)
.unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None))
);
// when block is found between two entries
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(&DummyStorage::new()
.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 })
.with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }),
75).unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100)))));
assert_eq!(
Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(
&DummyStorage::new()
.with_entry(
test_id(100),
StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }
)
.with_entry(
test_id(50),
StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }
),
75
)
.unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100))))
);
// when block is not found
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(&DummyStorage::new()
.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 })
.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }),
30).unwrap(),
None);
assert_eq!(
Entry::<_, u64> { valid_from: test_id(100), value: 100 }
.search_best_before(
&DummyStorage::new()
.with_entry(
test_id(100),
StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }
)
.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }),
30
)
.unwrap(),
None
);
}
}
+100 -45
View File
@@ -20,17 +20,23 @@
use std::sync::Arc;
use sp_blockchain::{Error as ClientError, Result as ClientResult};
use codec::{Encode, Decode};
use sp_runtime::generic::BlockId;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use sp_database::{Database, Transaction};
use crate::utils::{self, meta_keys};
use codec::{Decode, Encode};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
use sp_database::{Database, Transaction};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor},
};
use crate::cache::{CacheItemT, ComplexBlockId};
use crate::cache::list_cache::{CommitOperation, Fork};
use crate::cache::list_entry::{Entry, StorageEntry};
use crate::DbHash;
use crate::{
cache::{
list_cache::{CommitOperation, Fork},
list_entry::{Entry, StorageEntry},
CacheItemT, ComplexBlockId,
},
DbHash,
};
/// Single list-cache metadata.
#[derive(Debug)]
@@ -54,14 +60,21 @@ pub trait Storage<Block: BlockT, T: CacheItemT> {
fn read_meta(&self) -> ClientResult<Metadata<Block>>;
/// Reads cache entry from the storage.
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>>;
fn read_entry(
&self,
at: &ComplexBlockId<Block>,
) -> ClientResult<Option<StorageEntry<Block, T>>>;
/// Reads referenced (and thus existing) cache entry from the storage.
fn require_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<StorageEntry<Block, T>> {
self.read_entry(at)
.and_then(|entry| entry
.ok_or_else(|| ClientError::from(
ClientError::Backend(format!("Referenced cache entry at {:?} is not found", at)))))
self.read_entry(at).and_then(|entry| {
entry.ok_or_else(|| {
ClientError::from(ClientError::Backend(format!(
"Referenced cache entry at {:?} is not found",
at
)))
})
})
}
}
@@ -111,10 +124,14 @@ impl DbStorage {
}
/// Get reference to the database.
pub fn db(&self) -> &Arc<dyn Database<DbHash>> { &self.db }
pub fn db(&self) -> &Arc<dyn Database<DbHash>> {
&self.db
}
/// Get reference to the database columns.
pub fn columns(&self) -> &DbColumns { &self.columns }
pub fn columns(&self) -> &DbColumns {
&self.columns
}
/// Encode block id for storing as a key in cache column.
/// We append prefix to the actual encoding to allow several caches
@@ -128,25 +145,35 @@ impl DbStorage {
impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DbStorage {
fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
utils::read_header::<Block>(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Number(at))
.map(|maybe_header| maybe_header.map(|header| header.hash()))
utils::read_header::<Block>(
&*self.db,
self.columns.key_lookup,
self.columns.header,
BlockId::Number(at),
)
.map(|maybe_header| maybe_header.map(|header| header.hash()))
}
fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
utils::read_header::<Block>(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Hash(*at))
utils::read_header::<Block>(
&*self.db,
self.columns.key_lookup,
self.columns.header,
BlockId::Hash(*at),
)
}
fn read_meta(&self) -> ClientResult<Metadata<Block>> {
match self.db.get(self.columns.meta, &self.meta_key) {
Some(meta) => meta::decode(&*meta),
None => Ok(Metadata {
finalized: None,
unfinalized: Vec::new(),
})
None => Ok(Metadata { finalized: None, unfinalized: Vec::new() }),
}
}
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
fn read_entry(
&self,
at: &ComplexBlockId<Block>,
) -> ClientResult<Option<StorageEntry<Block, T>>> {
match self.db.get(self.columns.cache, &self.encode_block_id(at)) {
Some(entry) => StorageEntry::<Block, T>::decode(&mut &entry[..])
.map_err(|_| ClientError::Backend("Failed to decode cache entry".into()))
@@ -171,7 +198,11 @@ impl<'a> DbStorageTransaction<'a> {
impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DbStorageTransaction<'a> {
fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, entry: &StorageEntry<Block, T>) {
self.tx.set_from_vec(self.storage.columns.cache, &self.storage.encode_block_id(at), entry.encode());
self.tx.set_from_vec(
self.storage.columns.cache,
&self.storage.encode_block_id(at),
entry.encode(),
);
}
fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>) {
@@ -187,7 +218,8 @@ impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DbStorag
self.tx.set_from_vec(
self.storage.columns.meta,
&self.storage.meta_key,
meta::encode(best_finalized_entry, unfinalized, operation));
meta::encode(best_finalized_entry, unfinalized, operation),
);
}
}
@@ -206,10 +238,11 @@ mod meta {
pub fn encode<Block: BlockT, T: CacheItemT>(
best_finalized_entry: Option<&Entry<Block, T>>,
unfinalized: &[Fork<Block, T>],
op: &CommitOperation<Block, T>
op: &CommitOperation<Block, T>,
) -> Vec<u8> {
let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from);
let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::<Vec<_>>();
let mut unfinalized =
unfinalized.iter().map(|fork| &fork.head().valid_from).collect::<Vec<_>>();
match op {
CommitOperation::AppendNewBlock(_, _) => (),
@@ -230,8 +263,11 @@ mod meta {
CommitOperation::BlockReverted(ref forks) => {
for (fork_index, updated_fork) in forks.iter().rev() {
match updated_fork {
Some(updated_fork) => unfinalized[*fork_index] = &updated_fork.head().valid_from,
None => { unfinalized.remove(*fork_index); },
Some(updated_fork) =>
unfinalized[*fork_index] = &updated_fork.head().valid_from,
None => {
unfinalized.remove(*fork_index);
},
}
}
},
@@ -243,10 +279,12 @@ mod meta {
/// Decode meta information.
pub fn decode<Block: BlockT>(encoded: &[u8]) -> ClientResult<Metadata<Block>> {
let input = &mut &*encoded;
let finalized: Option<ComplexBlockId<Block>> = Decode::decode(input)
.map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?;
let unfinalized: Vec<ComplexBlockId<Block>> = Decode::decode(input)
.map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?;
let finalized: Option<ComplexBlockId<Block>> = Decode::decode(input).map_err(|_| {
ClientError::from(ClientError::Backend("Error decoding cache meta".into()))
})?;
let unfinalized: Vec<ComplexBlockId<Block>> = Decode::decode(input).map_err(|_| {
ClientError::from(ClientError::Backend("Error decoding cache meta".into()))
})?;
Ok(Metadata { finalized, unfinalized })
}
@@ -254,8 +292,8 @@ mod meta {
#[cfg(test)]
pub mod tests {
use std::collections::{HashMap, HashSet};
use super::*;
use std::collections::{HashMap, HashSet};
pub struct FaultyStorage;
@@ -272,7 +310,10 @@ pub mod tests {
Err(ClientError::Backend("TestError".into()))
}
fn read_entry(&self, _at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
fn read_entry(
&self,
_at: &ComplexBlockId<Block>,
) -> ClientResult<Option<StorageEntry<Block, T>>> {
Err(ClientError::Backend("TestError".into()))
}
}
@@ -287,17 +328,18 @@ pub mod tests {
impl<Block: BlockT, T: CacheItemT> DummyStorage<Block, T> {
pub fn new() -> Self {
DummyStorage {
meta: Metadata {
finalized: None,
unfinalized: Vec::new(),
},
meta: Metadata { finalized: None, unfinalized: Vec::new() },
ids: HashMap::new(),
headers: HashMap::new(),
entries: HashMap::new(),
}
}
pub fn with_meta(mut self, finalized: Option<ComplexBlockId<Block>>, unfinalized: Vec<ComplexBlockId<Block>>) -> Self {
pub fn with_meta(
mut self,
finalized: Option<ComplexBlockId<Block>>,
unfinalized: Vec<ComplexBlockId<Block>>,
) -> Self {
self.meta.finalized = finalized;
self.meta.unfinalized = unfinalized;
self
@@ -313,7 +355,11 @@ pub mod tests {
self
}
pub fn with_entry(mut self, at: ComplexBlockId<Block>, entry: StorageEntry<Block, T>) -> Self {
pub fn with_entry(
mut self,
at: ComplexBlockId<Block>,
entry: StorageEntry<Block, T>,
) -> Self {
self.entries.insert(at.hash, entry);
self
}
@@ -332,7 +378,10 @@ pub mod tests {
Ok(self.meta.clone())
}
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
fn read_entry(
&self,
at: &ComplexBlockId<Block>,
) -> ClientResult<Option<StorageEntry<Block, T>>> {
Ok(self.entries.get(&at.hash).cloned())
}
}
@@ -366,7 +415,11 @@ pub mod tests {
}
impl<Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DummyTransaction<Block> {
fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, _entry: &StorageEntry<Block, T>) {
fn insert_storage_entry(
&mut self,
at: &ComplexBlockId<Block>,
_entry: &StorageEntry<Block, T>,
) {
self.inserted_entries.insert(at.hash);
}
@@ -380,7 +433,9 @@ pub mod tests {
unfinalized: &[Fork<Block, T>],
operation: &CommitOperation<Block, T>,
) {
self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap());
self.updated_meta = Some(
meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(),
);
}
}
}
+63 -59
View File
@@ -18,17 +18,27 @@
//! DB-backed cache of blockchain data.
use std::{sync::Arc, collections::{HashMap, hash_map::Entry}};
use parking_lot::RwLock;
use std::{
collections::{hash_map::Entry, HashMap},
sync::Arc,
};
use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache};
use sp_blockchain::{Result as ClientResult, HeaderMetadataCache};
use crate::{
utils::{self, COLUMN_META},
DbHash,
};
use codec::{Decode, Encode};
use sc_client_api::blockchain::{
well_known_cache_keys::{self, Id as CacheKeyId},
Cache as BlockchainCache,
};
use sp_blockchain::{HeaderMetadataCache, Result as ClientResult};
use sp_database::{Database, Transaction};
use codec::{Encode, Decode};
use sp_runtime::generic::BlockId;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
use crate::utils::{self, COLUMN_META};
use crate::DbHash;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero},
};
use self::list_cache::{ListCache, PruningStrategy};
@@ -118,7 +128,10 @@ impl<Block: BlockT> DbCache<Block> {
}
/// Begin cache transaction.
pub fn transaction<'a>(&'a mut self, tx: &'a mut Transaction<DbHash>) -> DbCacheTransaction<'a, Block> {
pub fn transaction<'a>(
&'a mut self,
tx: &'a mut Transaction<DbHash>,
) -> DbCacheTransaction<'a, Block> {
DbCacheTransaction {
cache: self,
tx,
@@ -164,7 +177,7 @@ impl<Block: BlockT> DbCache<Block> {
self.key_lookup_column,
self.header_column,
self.cache_column,
&self.best_finalized_block
&self.best_finalized_block,
)
}
}
@@ -184,19 +197,16 @@ fn get_cache_helper<'a, Block: BlockT>(
Entry::Occupied(entry) => Ok(entry.into_mut()),
Entry::Vacant(entry) => {
let cache = ListCache::new(
self::list_storage::DbStorage::new(name.to_vec(), db.clone(),
self::list_storage::DbColumns {
meta: COLUMN_META,
key_lookup,
header,
cache,
},
self::list_storage::DbStorage::new(
name.to_vec(),
db.clone(),
self::list_storage::DbColumns { meta: COLUMN_META, key_lookup, header, cache },
),
cache_pruning_strategy(name),
best_finalized_block.clone(),
)?;
Ok(entry.insert(cache))
}
},
}
}
@@ -210,10 +220,7 @@ pub struct DbCacheTransactionOps<Block: BlockT> {
impl<Block: BlockT> DbCacheTransactionOps<Block> {
/// Empty transaction ops.
pub fn empty() -> DbCacheTransactionOps<Block> {
DbCacheTransactionOps {
cache_at_ops: HashMap::new(),
best_finalized_block: None,
}
DbCacheTransactionOps { cache_at_ops: HashMap::new(), best_finalized_block: None }
}
}
@@ -244,19 +251,21 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
) -> ClientResult<Self> {
// prepare list of caches that are not update
// (we might still need to do some cache maintenance in this case)
let missed_caches = self.cache.cache_at.keys()
let missed_caches = self
.cache
.cache_at
.keys()
.filter(|cache| !data_at.contains_key(*cache))
.cloned()
.collect::<Vec<_>>();
let mut insert_op = |name: CacheKeyId, value: Option<Vec<u8>>| -> Result<(), sp_blockchain::Error> {
let mut insert_op = |name: CacheKeyId,
value: Option<Vec<u8>>|
-> Result<(), sp_blockchain::Error> {
let cache = self.cache.get_cache(name)?;
let cache_ops = self.cache_at_ops.entry(name).or_default();
cache.on_block_insert(
&mut self::list_storage::DbStorageTransaction::new(
cache.storage(),
&mut self.tx,
),
&mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx),
parent.clone(),
block.clone(),
value,
@@ -271,8 +280,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?;
match entry_type {
EntryType::Final | EntryType::Genesis =>
self.best_finalized_block = Some(block),
EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block),
EntryType::NonFinal => (),
}
@@ -288,10 +296,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
for (name, cache) in self.cache.cache_at.iter() {
let cache_ops = self.cache_at_ops.entry(*name).or_default();
cache.on_block_finalize(
&mut self::list_storage::DbStorageTransaction::new(
cache.storage(),
&mut self.tx
),
&mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx),
parent.clone(),
block.clone(),
cache_ops,
@@ -304,17 +309,11 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
}
/// When block is reverted.
pub fn on_block_revert(
mut self,
reverted_block: &ComplexBlockId<Block>,
) -> ClientResult<Self> {
pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId<Block>) -> ClientResult<Self> {
for (name, cache) in self.cache.cache_at.iter() {
let cache_ops = self.cache_at_ops.entry(*name).or_default();
cache.on_block_revert(
&mut self::list_storage::DbStorageTransaction::new(
cache.storage(),
&mut self.tx
),
&mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx),
reverted_block,
cache_ops,
)?;
@@ -352,7 +351,9 @@ impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
&self,
key: &CacheKeyId,
at: &BlockId<Block>,
) -> ClientResult<Option<((NumberFor<Block>, Block::Hash), Option<(NumberFor<Block>, Block::Hash)>, Vec<u8>)>> {
) -> ClientResult<
Option<((NumberFor<Block>, Block::Hash), Option<(NumberFor<Block>, Block::Hash)>, Vec<u8>)>,
> {
let mut cache = self.0.write();
let header_metadata_cache = cache.header_metadata_cache.clone();
let cache = cache.get_cache(*key)?;
@@ -360,36 +361,39 @@ impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
let db = storage.db();
let columns = storage.columns();
let at = match *at {
BlockId::Hash(hash) => {
match header_metadata_cache.header_metadata(hash) {
Some(metadata) => ComplexBlockId::new(hash, metadata.number),
None => {
let header = utils::require_header::<Block>(
&**db,
columns.key_lookup,
columns.header,
BlockId::Hash(hash.clone()))?;
ComplexBlockId::new(hash, *header.number())
}
}
BlockId::Hash(hash) => match header_metadata_cache.header_metadata(hash) {
Some(metadata) => ComplexBlockId::new(hash, metadata.number),
None => {
let header = utils::require_header::<Block>(
&**db,
columns.key_lookup,
columns.header,
BlockId::Hash(hash.clone()),
)?;
ComplexBlockId::new(hash, *header.number())
},
},
BlockId::Number(number) => {
let hash = utils::require_header::<Block>(
&**db,
columns.key_lookup,
columns.header,
BlockId::Number(number.clone()))?.hash();
BlockId::Number(number.clone()),
)?
.hash();
ComplexBlockId::new(hash, number)
},
};
cache.value_at_block(&at)
.map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)|
cache.value_at_block(&at).map(|block_and_value| {
block_and_value.map(|(begin_block, end_block, value)| {
(
(begin_block.number, begin_block.hash),
end_block.map(|end_block| (end_block.number, end_block.hash)),
value,
)))
)
})
})
}
}