diff --git a/substrate/core/client/db/src/cache.rs b/substrate/core/client/db/src/cache.rs
deleted file mode 100644
index 4538709f52..0000000000
--- a/substrate/core/client/db/src/cache.rs
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2017 Parity Technologies (UK) Ltd.
-// This file is part of Substrate.
-
-// Substrate is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Substrate is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Substrate. If not, see .
-
-//! DB-backed cache of blockchain data.
-
-use std::sync::Arc;
-use parking_lot::RwLock;
-
-use kvdb::{KeyValueDB, DBTransaction};
-
-use client::blockchain::Cache as BlockchainCache;
-use client::error::Result as ClientResult;
-use codec::{Codec, Encode, Decode};
-use primitives::AuthorityId;
-use runtime_primitives::generic::BlockId;
-use runtime_primitives::traits::{Block as BlockT, As, NumberFor};
-use utils::{COLUMN_META, BlockLookupKey, db_err, meta_keys, lookup_key_to_number, number_to_lookup_key};
-
-/// Database-backed cache of blockchain data.
-pub struct DbCache {
- db: Arc,
- block_index_column: Option,
- header_column: Option,
- authorities_at: DbCacheList>,
-}
-
-impl DbCache
- where
- Block: BlockT,
- NumberFor: As,
-{
- /// Create new cache.
- pub fn new(
- db: Arc,
- block_index_column: Option,
- header_column: Option,
- authorities_column: Option
- ) -> ClientResult {
- Ok(DbCache {
- db: db.clone(),
- block_index_column,
- header_column,
- authorities_at: DbCacheList::new(db, meta_keys::BEST_AUTHORITIES, authorities_column)?,
- })
- }
-
- /// Get authorities_cache.
- #[allow(unused)]
- pub fn authorities_at_cache(&self) -> &DbCacheList> {
- &self.authorities_at
- }
-}
-
-impl BlockchainCache for DbCache
- where
- Block: BlockT,
- NumberFor: As,
-{
- fn authorities_at(&self, at: BlockId) -> Option> {
- use runtime_primitives::traits::Header as HeaderT;
-
- let number = match at {
- BlockId::Number(n) => Ok(number_to_lookup_key(n)),
- BlockId::Hash(h) => {
- let maybe_header = ::utils::read_header::(
- &*self.db,
- self.block_index_column,
- self.header_column,
- BlockId::Hash(h),
- );
-
- match maybe_header {
- Ok(Some(hdr)) => Ok(number_to_lookup_key(*hdr.number())),
- Ok(None) => return None, // no such block.
- Err(e) => Err(e),
- }
- }
- };
-
- let authorities_at = number.and_then(|at| self.authorities_at.value_at_key(at));
-
- match authorities_at {
- Ok(authorities) => authorities,
- Err(error) => {
- warn!("Trying to read authorities from db cache has failed with: {}", error);
- None
- },
- }
- }
-}
-
-/// Database-backed blockchain cache which holds its entries as a list.
-/// The meta column holds the pointer to the best known cache entry and
-/// every entry points to the previous entry.
-/// New entry appears when the set of authorities changes in block, so the
-/// best entry here means the entry that is valid for the best block (and
-/// probably for its ascendants).
-pub struct DbCacheList {
- db: Arc,
- meta_key: &'static [u8],
- column: Option,
- /// Best entry at the moment. None means that cache has no entries at all.
- best_entry: RwLock, T>>>,
-}
-
-/// Single cache entry.
-#[derive(Clone)]
-#[cfg_attr(test, derive(Debug, PartialEq))]
-pub struct Entry {
- /// first block, when this value became actual
- valid_from: N,
- /// None means that we do not know the value starting from `valid_from` block
- value: Option,
-}
-
-/// Internal representation of the single cache entry. The entry points to the
-/// previous entry in the cache, allowing us to traverse back in time in list-style.
-#[derive(Encode, Decode)]
-#[cfg_attr(test, derive(Debug, PartialEq))]
-struct StorageEntry {
- /// None if valid from the beginning
- prev_valid_from: Option,
- /// None means that we do not know the value starting from `valid_from` block
- value: Option,
-}
-
-impl DbCacheList
- where
- Block: BlockT,
- NumberFor: As,
- T: Clone + PartialEq + Codec,
-{
- /// Creates new cache list.
- fn new(db: Arc, meta_key: &'static [u8], column: Option) -> ClientResult {
- let best_entry = RwLock::new(db.get(COLUMN_META, meta_key)
- .map_err(db_err)
- .and_then(|block| match block {
- Some(block) => {
- let valid_from = lookup_key_to_number(&block)?;
- read_storage_entry::(&*db, column, valid_from)
- .map(|entry| Some(Entry {
- valid_from,
- value: entry
- .expect("meta entry references the entry at the block; storage entry at block exists when referenced; qed")
- .value,
- }))
- },
- None => Ok(None),
- })?);
-
- Ok(DbCacheList {
- db,
- column,
- meta_key,
- best_entry,
- })
- }
-
- /// Gets the best known entry.
- pub fn best_entry(&self) -> Option, T>> {
- self.best_entry.read().clone()
- }
-
- /// Commits the new best pending value to the database. Returns Some if best entry must
- /// be updated after transaction is committed.
- #[allow(unused)]
- pub fn commit_best_entry(
- &self,
- transaction: &mut DBTransaction,
- valid_from: NumberFor,
- pending_value: Option
- ) -> Option, T>> {
- let best_entry = self.best_entry();
- let update_best_entry = match (
- best_entry.as_ref().and_then(|a| a.value.as_ref()),
- pending_value.as_ref()
- ) {
- (Some(best_value), Some(pending_value)) => best_value != pending_value,
- (None, Some(_)) | (Some(_), None) => true,
- (None, None) => false,
- };
- if !update_best_entry {
- return None;
- }
-
- let valid_from_key = number_to_lookup_key(valid_from);
- transaction.put(COLUMN_META, self.meta_key, &valid_from_key);
- transaction.put(self.column, &valid_from_key, &StorageEntry {
- prev_valid_from: best_entry.map(|b| b.valid_from),
- value: pending_value.clone(),
- }.encode());
-
- Some(Entry {
- valid_from,
- value: pending_value,
- })
- }
-
- /// Updates the best in-memory cache entry. Must be called after transaction with changes
- /// from commit_best_entry has been committed.
- #[allow(unused)]
- pub fn update_best_entry(&self, best_entry: Option, T>>) {
- *self.best_entry.write() = best_entry;
- }
-
- /// Prune all entries from the beginning up to the block (including entry at the number). Returns
- /// the number of pruned entries. Pruning never deletes the latest entry in the cache.
- #[allow(unused)]
- pub fn prune_entries(
- &self,
- transaction: &mut DBTransaction,
- last_to_prune: NumberFor
- ) -> ClientResult {
- // find the last entry we want to keep
- let mut last_entry_to_keep = match self.best_entry() {
- Some(best_entry) => best_entry.valid_from,
- None => return Ok(0),
- };
- let mut first_entry_to_remove = last_entry_to_keep;
- while first_entry_to_remove > last_to_prune {
- last_entry_to_keep = first_entry_to_remove;
-
- let entry = read_storage_entry::(&*self.db, self.column, first_entry_to_remove)?
- .expect("entry referenced from the next entry; entry exists when referenced; qed");
- // if we have reached the first list entry
- // AND all list entries are for blocks that are later than last_to_prune
- // => nothing to prune
- first_entry_to_remove = match entry.prev_valid_from {
- Some(prev_valid_from) => prev_valid_from,
- None => return Ok(0),
- }
- }
-
- // remove all entries, starting from entry_to_remove
- let mut pruned = 0;
- let mut entry_to_remove = Some(first_entry_to_remove);
- while let Some(current_entry) = entry_to_remove {
- let entry = read_storage_entry::(&*self.db, self.column, current_entry)?
- .expect("referenced entry exists; entry_to_remove is a reference to the entry; qed");
-
- if current_entry != last_entry_to_keep {
- transaction.delete(self.column, &number_to_lookup_key(current_entry));
- pruned += 1;
- }
- entry_to_remove = entry.prev_valid_from;
- }
-
- let mut entry = read_storage_entry::(&*self.db, self.column, last_entry_to_keep)?
- .expect("last_entry_to_keep >= first_entry_to_remove; that means that we're leaving this entry in the db; qed");
- entry.prev_valid_from = None;
- transaction.put(self.column, &number_to_lookup_key(last_entry_to_keep), &entry.encode());
-
- Ok(pruned)
- }
-
- /// Reads the cached value, actual at given block. Returns None if the value was not cached
- /// or if it has been pruned.
- fn value_at_key(&self, key: BlockLookupKey) -> ClientResult> {
- let at = lookup_key_to_number::>(&key)?;
- let best_valid_from = match self.best_entry() {
- // there are entries in cache
- Some(best_entry) => {
- // we're looking for the best value
- if at >= best_entry.valid_from {
- return Ok(best_entry.value);
- }
-
- // we're looking for the value of older blocks
- best_entry.valid_from
- },
- // there are no entries in the cache
- None => return Ok(None),
- };
-
- let mut entry = read_storage_entry::(&*self.db, self.column, best_valid_from)?
- .expect("self.best_entry().is_some() if there's entry for best_valid_from; qed");
- loop {
- let prev_valid_from = match entry.prev_valid_from {
- Some(prev_valid_from) => prev_valid_from,
- None => return Ok(None),
- };
-
- let prev_entry = read_storage_entry::(&*self.db, self.column, prev_valid_from)?
- .expect("entry referenced from the next entry; entry exists when referenced; qed");
- if at >= prev_valid_from {
- return Ok(prev_entry.value);
- }
-
- entry = prev_entry;
- }
- }
-}
-
-/// Reads the entry at the block with given number.
-fn read_storage_entry(
- db: &KeyValueDB,
- column: Option,
- number: NumberFor
-) -> ClientResult, T>>>
- where
- Block: BlockT,
- NumberFor: As,
- T: Codec,
-{
- db.get(column, &number_to_lookup_key(number))
- .and_then(|entry| match entry {
- Some(entry) => Ok(StorageEntry::, T>::decode(&mut &entry[..])),
- None => Ok(None),
- })
- .map_err(db_err)
-}
-
-#[cfg(test)]
-mod tests {
- use runtime_primitives::testing::Block as RawBlock;
- use light::{AUTHORITIES_ENTRIES_TO_KEEP, columns, LightStorage};
- use light::tests::insert_block;
- use super::*;
-
- type Block = RawBlock;
-
- #[test]
- fn authorities_storage_entry_serialized() {
- let test_cases: Vec>> = vec![
- StorageEntry { prev_valid_from: Some(42), value: Some(vec![[1u8; 32].into()]) },
- StorageEntry { prev_valid_from: None, value: Some(vec![[1u8; 32].into(), [2u8; 32].into()]) },
- StorageEntry { prev_valid_from: None, value: None },
- ];
-
- for expected in test_cases {
- let serialized = expected.encode();
- let deserialized = StorageEntry::decode(&mut &serialized[..]).unwrap();
- assert_eq!(expected, deserialized);
- }
- }
-
- #[test]
- #[ignore] // TODO: unignore when cache reinstated.
- fn best_authorities_are_updated() {
- let db = LightStorage::new_test();
- let authorities_at: Vec<(usize, Option>>)> = vec![
- (0, None),
- (0, None),
- (1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
- (1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
- (2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
- (2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
- (3, Some(Entry { valid_from: 5, value: None })),
- (3, Some(Entry { valid_from: 5, value: None })),
- ];
-
- // before any block, there are no entries in cache
- assert!(db.cache().authorities_at_cache().best_entry().is_none());
- assert_eq!(db.db().iter(columns::AUTHORITIES).count(), 0);
-
- // insert blocks and check that best_authorities() returns correct result
- let mut prev_hash = Default::default();
- for number in 0..authorities_at.len() {
- let authorities_at_number = authorities_at[number].1.clone().and_then(|e| e.value);
- prev_hash = insert_block(&db, &prev_hash, number as u64, authorities_at_number);
- assert_eq!(db.cache().authorities_at_cache().best_entry(), authorities_at[number].1);
- assert_eq!(db.db().iter(columns::AUTHORITIES).count(), authorities_at[number].0);
- }
-
- // check that authorities_at() returns correct results for all retrospective blocks
- for number in 1..authorities_at.len() + 1 {
- assert_eq!(db.cache().authorities_at(BlockId::Number(number as u64)),
- authorities_at.get(number + 1)
- .or_else(|| authorities_at.last())
- .unwrap().1.clone().and_then(|e| e.value));
- }
-
- // now check that cache entries are pruned when new blocks are inserted
- let mut current_entries_count = authorities_at.last().unwrap().0;
- let pruning_starts_at = AUTHORITIES_ENTRIES_TO_KEEP as usize;
- for number in authorities_at.len()..authorities_at.len() + pruning_starts_at {
- prev_hash = insert_block(&db, &prev_hash, number as u64, None);
- if number > pruning_starts_at {
- let prev_entries_count = authorities_at[number - pruning_starts_at].0;
- let entries_count = authorities_at.get(number - pruning_starts_at + 1).map(|e| e.0)
- .unwrap_or_else(|| authorities_at.last().unwrap().0);
- current_entries_count -= entries_count - prev_entries_count;
- }
-
- // there's always at least 1 entry in the cache (after first insertion)
- assert_eq!(db.db().iter(columns::AUTHORITIES).count(), ::std::cmp::max(current_entries_count, 1));
- }
- }
-
- #[test]
- fn best_authorities_are_pruned() {
- let db = LightStorage::::new_test();
- let mut transaction = DBTransaction::new();
-
- // insert first entry at block#100
- db.cache().authorities_at_cache().update_best_entry(
- db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 100, Some(vec![[1u8; 32].into()])));
- db.db().write(transaction).unwrap();
-
- // no entries are pruned, since there's only one entry in the cache
- let mut transaction = DBTransaction::new();
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 0);
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 0);
-
- // insert second entry at block#200
- let mut transaction = DBTransaction::new();
- db.cache().authorities_at_cache().update_best_entry(
- db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 200, Some(vec![[2u8; 32].into()])));
- db.db().write(transaction).unwrap();
-
- let mut transaction = DBTransaction::new();
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 1);
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
- // still only 1 entry is removed since pruning never deletes the last entry
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 200).unwrap(), 1);
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 250).unwrap(), 1);
-
- // physically remove entry for block#100 from db
- let mut transaction = DBTransaction::new();
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
- db.db().write(transaction).unwrap();
-
- assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
- assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
- assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
-
- // try to delete last entry => failure (no entries are removed)
- let mut transaction = DBTransaction::new();
- assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 300).unwrap(), 0);
- db.db().write(transaction).unwrap();
-
- assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
- assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
- assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
- assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
- }
-}
diff --git a/substrate/core/client/db/src/cache/list_cache.rs b/substrate/core/client/db/src/cache/list_cache.rs
new file mode 100644
index 0000000000..50d227c165
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_cache.rs
@@ -0,0 +1,1383 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate. If not, see .
+
+//! List-based cache.
+//!
+//! Maintains several lists, containing nodes that are inserted whenever
+//! cached value at new block differs from the value at previous block.
+//! Example:
+//! B1(a) <--- B2(b) <--- B3(b) <--- B4(c)
+//! N1(b) <-------------- N2(c)
+//!
+//! There's single list for all finalized blocks and >= 0 lists for unfinalized
+//! blocks.
+//! When new non-final block is inserted (with value that differs from the value
+//! at parent), it starts new unfinalized fork.
+//! When new final block is inserted (with value that differs from the value at
+//! parent), new entry is appended to the finalized fork.
+//! When existing non-final block is finalized (with value that differs from the
+//! value at parent), new entry is appended to the finalized fork AND unfinalized
+//! fork is dropped.
+//!
+//! Entries from abandoned unfinalized forks (forks that are forking from block B
+//! which is ascendant of the best finalized block) are deleted when block F with
+//! number B.number (i.e. 'parallel' canon block) is finalized.
+//!
+//! Finalized entry E1 is pruned when block B is finalized so that:
+//! EntryAt(B.number - prune_depth).points_to(E1)
+
+use std::collections::BTreeSet;
+
+use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult};
+use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_entry::{Entry, StorageEntry};
+use cache::list_storage::{Storage, StorageTransaction, Metadata};
+
+/// List-based cache.
+pub struct ListCache> {
+ /// Cache storage.
+ storage: S,
+ /// Prune depth.
+ prune_depth: NumberFor,
+ /// Best finalized block.
+ best_finalized_block: ComplexBlockId,
+ /// Best finalized entry (if exists).
+ best_finalized_entry: Option>,
+ /// All unfinalized 'forks'.
+ unfinalized: Vec>,
+}
+
+/// All possible list cache operations that could be performed after transaction is committed.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum CommitOperation {
+ /// New block is appended to the fork without changing the cached value.
+ AppendNewBlock(usize, ComplexBlockId),
+ /// New block is appended to the fork with the different value.
+ AppendNewEntry(usize, Entry),
+ /// New fork is added with the given head entry.
+ AddNewFork(Entry),
+ /// New block is finalized and possibly:
+ /// - new entry is finalized AND/OR
+ /// - some forks are destroyed
+ BlockFinalized(ComplexBlockId, Option>, BTreeSet),
+}
+
+/// Single fork of list-based cache.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Fork {
+ /// The best block of this fork. We do not save this field in the database to avoid
+ /// extra updates => it could be None after restart. It will be either filled when
+ /// the block is appended to this fork, or the whole fork will be abandoned when the
+ /// block from the other fork is finalized
+ best_block: Option>,
+ /// The head entry of this fork.
+ head: Entry,
+}
+
+/// Outcome of Fork::try_append_or_fork.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum ForkAppendResult {
+ /// New entry should be appended to the end of the fork.
+ Append,
+ /// New entry should be forked from the fork, starting with entry at given block.
+ Fork(ComplexBlockId),
+}
+
+impl> ListCache {
+ /// Create new db list cache entry.
+ pub fn new(storage: S, prune_depth: NumberFor, best_finalized_block: ComplexBlockId) -> Self {
+ let (best_finalized_entry, unfinalized) = storage.read_meta()
+ .and_then(|meta| read_forks(&storage, meta))
+ .unwrap_or_else(|error| {
+ warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error);
+ (None, Vec::new())
+ });
+
+ ListCache {
+ storage,
+ prune_depth,
+ best_finalized_block,
+ best_finalized_entry,
+ unfinalized,
+ }
+ }
+
+ /// Get reference to the storage.
+ pub fn storage(&self) -> &S {
+ &self.storage
+ }
+
+ /// Get value valid at block.
+ pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult> {
+ let head = if at.number <= self.best_finalized_block.number {
+ // if the block is older than the best known finalized block
+ // => we're should search for the finalized value
+
+ // BUT since we're not guaranteeing to provide correct values for forks
+ // behind the finalized block, check if the block is finalized first
+ if !chain::is_finalized_block(&self.storage, at, As::sa(::std::u64::MAX))? {
+ return Ok(None);
+ }
+
+ self.best_finalized_entry.as_ref()
+ } else if self.unfinalized.is_empty() {
+ // there are no unfinalized entries
+ // => we should search for the finalized value
+ self.best_finalized_entry.as_ref()
+ } else {
+ // there are unfinalized entries
+ // => find the fork containing given block and read from this fork
+ // IF there's no matching fork, ensure that this isn't a block from a fork that has forked
+ // behind the best finalized block and search at finalized fork
+
+ match self.find_unfinalized_fork(at)? {
+ Some(fork) => Some(&fork.head),
+ None => match self.best_finalized_entry.as_ref() {
+ Some(best_finalized_entry) if chain::is_connected_to_block(&self.storage, &best_finalized_entry.valid_from, at)? =>
+ Some(best_finalized_entry),
+ _ => None,
+ },
+ }
+ };
+
+ match head {
+ Some(head) => head.search_best_before(&self.storage, at.number, true)
+ .map(|e| e.and_then(|e| e.0.value)),
+ None => Ok(None),
+ }
+ }
+
+ /// When new block is inserted into database.
+ pub fn on_block_insert>(
+ &self,
+ tx: &mut Tx,
+ parent: ComplexBlockId,
+ block: ComplexBlockId,
+ value: Option,
+ is_final: bool,
+ ) -> ClientResult>> {
+ // this guarantee is currently provided by LightStorage && we're relying on it here
+ debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash);
+
+ // we do not store any values behind finalized
+ if block.number != Zero::zero() && self.best_finalized_block.number >= block.number {
+ return Ok(None);
+ }
+
+ // if the block is not final, it is possibly appended to/forking from existing unfinalized fork
+ if !is_final {
+ let mut fork_and_action = None;
+
+ // first: try to find fork that is known to has the best block we're appending to
+ for (index, fork) in self.unfinalized.iter().enumerate() {
+ if fork.try_append(&parent) {
+ fork_and_action = Some((index, ForkAppendResult::Append));
+ break;
+ }
+ }
+
+ // if not found, check cases:
+ // - we're appending to the fork for the first time after restart;
+ // - we're forking existing unfinalized fork from the middle;
+ if fork_and_action.is_none() {
+ let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number);
+ for (index, fork) in self.unfinalized.iter().enumerate() {
+ if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? {
+ fork_and_action = Some((index, action));
+ break;
+ }
+ }
+ }
+
+ // if we have found matching unfinalized fork => early exit
+ match fork_and_action {
+ // append to unfinalized fork
+ Some((index, ForkAppendResult::Append)) => {
+ let new_storage_entry = match self.unfinalized[index].head.try_update(value) {
+ Some(new_storage_entry) => new_storage_entry,
+ None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))),
+ };
+
+ tx.insert_storage_entry(&block, &new_storage_entry);
+ let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block));
+ tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+ return Ok(Some(operation));
+ },
+ // fork from the middle of unfinalized fork
+ Some((_, ForkAppendResult::Fork(prev_valid_from))) => {
+ // it is possible that we're inserting extra (but still required) fork here
+ let new_storage_entry = StorageEntry {
+ prev_valid_from: Some(prev_valid_from),
+ value,
+ };
+
+ tx.insert_storage_entry(&block, &new_storage_entry);
+ let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block));
+ tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+ return Ok(Some(operation));
+ },
+ None => (),
+ }
+ }
+
+ // if we're here, then one of following is true:
+ // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do
+ // is to try to update last finalized entry
+ // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks
+
+ let new_storage_entry = match self.best_finalized_entry.as_ref() {
+ Some(best_finalized_entry) => best_finalized_entry.try_update(value),
+ None if value.is_some() => Some(StorageEntry { prev_valid_from: None, value }),
+ None => None,
+ };
+
+ if !is_final {
+ return Ok(match new_storage_entry {
+ Some(new_storage_entry) => {
+ tx.insert_storage_entry(&block, &new_storage_entry);
+ let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block));
+ tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+ Some(operation)
+ },
+ None => None,
+ });
+ }
+
+ // cleanup database from abandoned unfinalized forks and obsolete finalized entries
+ let abandoned_forks = self.destroy_abandoned_forks(tx, &block);
+ self.prune_finalized_entries(tx, &block);
+
+ match new_storage_entry {
+ Some(new_storage_entry) => {
+ tx.insert_storage_entry(&block, &new_storage_entry);
+ let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks);
+ tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+ Ok(Some(operation))
+ },
+ None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))),
+ }
+ }
+
+ /// When previously inserted block is finalized.
+ pub fn on_block_finalize>(
+ &self,
+ tx: &mut Tx,
+ parent: ComplexBlockId,
+ block: ComplexBlockId,
+ ) -> ClientResult>> {
+ // this guarantee is currently provided by LightStorage && we're relying on it here
+ debug_assert_eq!(self.best_finalized_block.hash, parent.hash);
+
+ // there could be at most one entry that is finalizing
+ let finalizing_entry = self.storage.read_entry(&block)?
+ .map(|entry| entry.into_entry(block.clone()));
+
+ // cleanup database from abandoned unfinalized forks and obsolete finalized entries
+ let abandoned_forks = self.destroy_abandoned_forks(tx, &block);
+ self.prune_finalized_entries(tx, &block);
+
+ let update_meta = finalizing_entry.is_some();
+ let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks);
+ if update_meta {
+ tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+ }
+ Ok(Some(operation))
+ }
+
+ /// When transaction is committed.
+ pub fn on_transaction_commit(&mut self, op: CommitOperation) {
+ match op {
+ CommitOperation::AppendNewBlock(index, best_block) => {
+ let mut fork = self.unfinalized.get_mut(index)
+ .expect("ListCache is a crate-private type;
+ internal clients of ListCache are committing transaction while cache is locked;
+ CommitOperation holds valid references while cache is locked; qed");
+ fork.best_block = Some(best_block);
+ },
+ CommitOperation::AppendNewEntry(index, entry) => {
+ let mut fork = self.unfinalized.get_mut(index)
+ .expect("ListCache is a crate-private type;
+ internal clients of ListCache are committing transaction while cache is locked;
+ CommitOperation holds valid references while cache is locked; qed");
+ fork.best_block = Some(entry.valid_from.clone());
+ fork.head = entry;
+ },
+ CommitOperation::AddNewFork(entry) => {
+ self.unfinalized.push(Fork {
+ best_block: Some(entry.valid_from.clone()),
+ head: entry,
+ });
+ },
+ CommitOperation::BlockFinalized(block, finalizing_entry, forks) => {
+ self.best_finalized_block = block;
+ if let Some(finalizing_entry) = finalizing_entry {
+ self.best_finalized_entry = Some(finalizing_entry);
+ }
+ for fork_index in forks.iter().rev() {
+ self.unfinalized.remove(*fork_index);
+ }
+ },
+ }
+ }
+
+ /// Prune old finalized entries.
+ fn prune_finalized_entries>(
+ &self,
+ tx: &mut Tx,
+ block: &ComplexBlockId
+ ) {
+ let mut do_pruning = || -> ClientResult<()> {
+ // calculate last ancient block number
+ let ancient_block = match block.number.as_().checked_sub(self.prune_depth.as_()) {
+ Some(number) => match self.storage.read_id(As::sa(number))? {
+ Some(hash) => ComplexBlockId::new(hash, As::sa(number)),
+ None => return Ok(()),
+ },
+ None => return Ok(()),
+ };
+
+ // if there's an entry at this block:
+ // - remove reference from this entry to the previous entry
+ // - destroy fork starting with previous entry
+ let current_entry = match self.storage.read_entry(&ancient_block)? {
+ Some(current_entry) => current_entry,
+ None => return Ok(()),
+ };
+ let first_entry_to_truncate = match current_entry.prev_valid_from {
+ Some(prev_valid_from) => prev_valid_from,
+ None => return Ok(()),
+ };
+
+ // truncate ancient entry
+ tx.insert_storage_entry(&ancient_block, &StorageEntry {
+ prev_valid_from: None,
+ value: current_entry.value,
+ });
+
+ // destroy 'fork' ending with previous entry
+ Fork { best_block: None, head: Entry { valid_from: first_entry_to_truncate, value: None } }
+ .destroy(&self.storage, tx, None)
+ };
+
+ if let Err(error) = do_pruning() {
+ warn!(target: "db", "Failed to prune ancient cache entries: {}", error);
+ }
+ }
+
+ /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized.
+ fn destroy_abandoned_forks>(
+ &self,
+ tx: &mut Tx,
+ block: &ComplexBlockId
+ ) -> BTreeSet {
+ let mut destroyed = BTreeSet::new();
+ for (index, fork) in self.unfinalized.iter().enumerate() {
+ if fork.head.valid_from.number == block.number {
+ destroyed.insert(index);
+ if fork.head.valid_from.hash != block.hash {
+ if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) {
+ warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error);
+ }
+ }
+ }
+ }
+
+ destroyed
+ }
+
+ /// Search unfinalized fork where given block belongs.
+ fn find_unfinalized_fork(&self, block: &ComplexBlockId) -> ClientResult>> {
+ for unfinalized in &self.unfinalized {
+ if unfinalized.matches(&self.storage, block)? {
+ return Ok(Some(&unfinalized));
+ }
+ }
+
+ Ok(None)
+ }
+}
+
+impl Fork {
+ /// Get reference to the head entry of this fork.
+ pub fn head(&self) -> &Entry {
+ &self.head
+ }
+
+ /// Check if the block is the part of the fork.
+ pub fn matches>(
+ &self,
+ storage: &S,
+ block: &ComplexBlockId,
+ ) -> ClientResult {
+ let range = self.head.search_best_range_before(storage, block.number)?;
+ match range {
+ None => Ok(false),
+ Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())),
+ }
+ }
+
+ /// Try to append NEW block to the fork. This method willonly 'work' (return true) when block
+ /// is actually appended to the fork AND the best known block of the fork is known (i.e. some
+ /// block has been already appended to this fork after last restart).
+ pub fn try_append(&self, parent: &ComplexBlockId) -> bool {
+ // when the best block of the fork is known, the check is trivial
+ //
+ // most of calls will hopefully end here, because best_block is only unknown
+ // after restart and until new block is appended to the fork
+ self.best_block.as_ref() == Some(parent)
+ }
+
+ /// Try to append new block to the fork OR fork it.
+ pub fn try_append_or_fork>(
+ &self,
+ storage: &S,
+ parent: &ComplexBlockId,
+ best_finalized_entry_block: Option>,
+ ) -> ClientResult>> {
+ // try to find entries that are (possibly) surrounding the parent block
+ let range = self.head.search_best_range_before(storage, parent.number)?;
+ let begin = match range {
+ Some((begin, _)) => begin,
+ None => return Ok(None),
+ };
+
+ // check if the parent is connected to the beginning of the range
+ if !chain::is_connected_to_block(storage, &parent, &begin)? {
+ return Ok(None);
+ }
+
+ // the block is connected to the begin-entry. If begin is the head entry
+ // => we need to append new block to the fork
+ if begin == self.head.valid_from {
+ return Ok(Some(ForkAppendResult::Append));
+ }
+
+ // the parent block belongs to this fork AND it is located after last finalized entry
+ // => we need to make a new fork
+ if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) {
+ return Ok(Some(ForkAppendResult::Fork(begin)));
+ }
+
+ Ok(None)
+ }
+
+ /// Destroy fork by deleting all unfinalized entries.
+ pub fn destroy, Tx: StorageTransaction>(
+ &self,
+ storage: &S,
+ tx: &mut Tx,
+ best_finalized_block: Option>,
+ ) -> ClientResult<()> {
+ let mut current = self.head.valid_from.clone();
+ loop {
+ // optionally: deletion stops when we found entry at finalized block
+ if let Some(best_finalized_block) = best_finalized_block {
+ if chain::is_finalized_block(storage, ¤t, best_finalized_block)? {
+ return Ok(());
+ }
+ }
+
+ // read pointer to previous entry
+ let entry = storage.require_entry(¤t)?;
+ tx.remove_storage_entry(¤t);
+
+ // deletion stops when there are no more entries in the list
+ current = match entry.prev_valid_from {
+ Some(prev_valid_from) => prev_valid_from,
+ None => return Ok(()),
+ };
+ }
+ }
+}
+
+/// Blockchain related functions.
+mod chain {
+ use runtime_primitives::traits::Header as HeaderT;
+ use super::*;
+
+ /// Is the block1 connected both ends of the range.
+ pub fn is_connected_to_range>(
+ storage: &S,
+ block: &ComplexBlockId,
+ range: (&ComplexBlockId, Option<&ComplexBlockId>),
+ ) -> ClientResult {
+ let (begin, end) = range;
+ Ok(is_connected_to_block(storage, block, begin)?
+ && match end {
+ Some(end) => is_connected_to_block(storage, block, end)?,
+ None => true,
+ })
+ }
+
+ /// Is the block1 directly connected (i.e. part of the same fork) to block2?
+ pub fn is_connected_to_block>(
+ storage: &S,
+ block1: &ComplexBlockId,
+ block2: &ComplexBlockId,
+ ) -> ClientResult {
+ let (begin, end) = if block1 > block2 { (block2, block1) } else { (block1, block2) };
+ let mut current = storage.read_header(&end.hash)?
+ .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", end.hash)))?;
+ while *current.number() > begin.number {
+ current = storage.read_header(current.parent_hash())?
+ .ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", current.parent_hash())))?;
+ }
+
+ Ok(begin.hash == current.hash())
+ }
+
+ /// Returns true if the given block is finalized.
+ pub fn is_finalized_block>(
+ storage: &S,
+ block: &ComplexBlockId,
+ best_finalized_block: NumberFor,
+ ) -> ClientResult {
+ if block.number > best_finalized_block {
+ return Ok(false);
+ }
+
+ storage.read_id(block.number)
+ .map(|hash| hash.as_ref() == Some(&block.hash))
+ }
+}
+
+/// Read list cache forks at blocks IDs.
+fn read_forks>(
+ storage: &S,
+ meta: Metadata,
+) -> ClientResult<(Option>, Vec>)> {
+ let finalized = match meta.finalized {
+ Some(finalized) => Some(storage.require_entry(&finalized)?
+ .into_entry(finalized)),
+ None => None,
+ };
+
+ let unfinalized = meta.unfinalized.into_iter()
+ .map(|unfinalized| storage.require_entry(&unfinalized)
+ .map(|storage_entry| Fork {
+ best_block: None,
+ head: storage_entry.into_entry(unfinalized),
+ }))
+ .collect::>()?;
+
+ Ok((finalized, unfinalized))
+}
+
+#[cfg(test)]
+pub mod tests {
+ use runtime_primitives::testing::{Header, Block as RawBlock};
+ use runtime_primitives::traits::Header as HeaderT;
+ use cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction};
+ use super::*;
+
+ type Block = RawBlock;
+
+ pub fn test_id(number: u64) -> ComplexBlockId {
+ ComplexBlockId::new(From::from(number), number)
+ }
+
+ fn correct_id(number: u64) -> ComplexBlockId {
+ ComplexBlockId::new(test_header(number).hash(), number)
+ }
+
+ fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId {
+ ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number)
+ }
+
+ fn test_header(number: u64) -> Header {
+ Header {
+ parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() },
+ number,
+ state_root: Default::default(),
+ extrinsics_root: Default::default(),
+ digest: Default::default(),
+ }
+ }
+
+ fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header {
+ if fork_from == number {
+ test_header(number)
+ } else {
+ Header {
+ parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(),
+ number,
+ state_root: (1 + fork_nonce).into(),
+ extrinsics_root: Default::default(),
+ digest: Default::default(),
+ }
+ }
+ }
+
+ #[test]
+ fn list_value_at_block_works() {
+ // when block is earlier than best finalized block AND it is not finalized
+ // --- 50 ---
+ // ----------> [100]
+ assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100))
+ .value_at_block(&test_id(50)).unwrap(), None);
+ // when block is earlier than best finalized block AND it is finalized AND value is empty
+ // [30] ---- 50 ---> [100]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(50, 50.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: None }),
+ 1024, test_id(100)
+ ).value_at_block(&test_id(50)).unwrap(), None);
+ // when block is earlier than best finalized block AND it is finalized AND value is some
+ // [30] ---- 50 ---> [100]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(50, 50.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+ 1024, test_id(100)
+ ).value_at_block(&test_id(50)).unwrap(), Some(30));
+ // when block is the best finalized block AND value is some
+ // ---> [100]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(100, 100.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+ 1024, test_id(100)
+ ).value_at_block(&test_id(100)).unwrap(), Some(100));
+ // when block is parallel to the best finalized block
+ // ---- 100
+ // ---> [100]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(50, 50.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+ 1024, test_id(100)
+ ).value_at_block(&ComplexBlockId::new(2.into(), 100)).unwrap(), None);
+
+ // when block is later than last finalized block AND there are no forks AND finalized value is None
+ // ---> [100] --- 200
+ assert_eq!(ListCache::<_, u64, _>::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(50, 50.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: None }),
+ 1024, test_id(100)
+ ).value_at_block(&test_id(200)).unwrap(), None);
+ // when block is later than last finalized block AND there are no forks AND finalized value is Some
+ // ---> [100] --- 200
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(test_id(100)), Vec::new())
+ .with_id(50, 50.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }),
+ 1024, test_id(100)
+ ).value_at_block(&test_id(200)).unwrap(), Some(100));
+
+ // when block is later than last finalized block AND there are no matching forks
+ // AND block is connected to finalized block AND finalized value is None
+ // --- 3
+ // ---> [2] /---------> [4]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(fork_header(0, 2, 3)),
+ 1024, test_id(2)
+ ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None);
+ // when block is later than last finalized block AND there are no matching forks
+ // AND block is connected to finalized block AND finalized value is Some
+ // --- 3
+ // ---> [2] /---------> [4]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(fork_header(0, 2, 3)),
+ 1024, test_id(2)
+ ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2));
+ // when block is later than last finalized block AND there are no matching forks
+ // AND block is not connected to finalized block
+ // --- 2 --- 3
+ // 1 /---> [2] ---------> [4]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_header(test_header(1))
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(fork_header(0, 1, 3))
+ .with_header(fork_header(0, 1, 2)),
+ 1024, test_id(2)
+ ).value_at_block(&fork_id(0, 1, 3)).unwrap(), None);
+
+ // when block is later than last finalized block AND it appends to unfinalized fork from the end
+ // AND unfinalized value is Some
+ // ---> [2] ---> [4] ---> 5
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_header(test_header(4))
+ .with_header(test_header(5)),
+ 1024, test_id(2)
+ ).value_at_block(&correct_id(5)).unwrap(), Some(4));
+ // when block is later than last finalized block AND it appends to unfinalized fork from the end
+ // AND unfinalized value is None
+ // ---> [2] ---> [4] ---> 5
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None })
+ .with_header(test_header(4))
+ .with_header(test_header(5)),
+ 1024, test_id(2)
+ ).value_at_block(&correct_id(5)).unwrap(), None);
+ // when block is later than last finalized block AND it fits to the middle of unfinalized fork
+ // AND unfinalized value is Some
+ // ---> [2] ---> [4] ---> 5 ---> [6]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(6)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: None })
+ .with_header(test_header(4))
+ .with_header(test_header(5))
+ .with_header(test_header(6)),
+ 1024, test_id(2)
+ ).value_at_block(&correct_id(5)).unwrap(), Some(4));
+ // when block is later than last finalized block AND it fits to the middle of unfinalized fork
+ // AND unfinalized value is None
+ // ---> [2] ---> [4] ---> 5 ---> [6]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(6)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None })
+ .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: Some(4) })
+ .with_header(test_header(4))
+ .with_header(test_header(5))
+ .with_header(test_header(6)),
+ 1024, test_id(2)
+ ).value_at_block(&correct_id(5)).unwrap(), None);
+ // when block is later than last finalized block AND it does not fits unfinalized fork
+ // AND it is connected to the finalized block AND finalized value is Some
+ // ---> [2] ----------> [4]
+ // \--- 3
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(fork_header(0, 2, 3)),
+ 1024, test_id(2)
+ ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2));
+ // when block is later than last finalized block AND it does not fits unfinalized fork
+ // AND it is connected to the finalized block AND finalized value is Some
+ // ---> [2] ----------> [4]
+ // \--- 3
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(fork_header(0, 2, 3)),
+ 1024, test_id(2)
+ ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None);
+ }
+
+ #[test]
+ fn list_on_block_insert_works() {
+ // when trying to insert block < finalized number
+ assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
+ .on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none());
+ // when trying to insert block @ finalized number
+ assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
+ .on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none());
+
+ // when trying to insert non-final block AND it appends to the best block of unfinalized fork
+ // AND new value is the same as in the fork' best block
+ let mut cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(None, vec![test_id(4)])
+ .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }),
+ 1024, test_id(2)
+ );
+ cache.unfinalized[0].best_block = Some(test_id(4));
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(),
+ Some(CommitOperation::AppendNewBlock(0, test_id(5))));
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.updated_meta().is_none());
+ // when trying to insert non-final block AND it appends to the best block of unfinalized fork
+ // AND new value is the same as in the fork' best block
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(),
+ Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) })));
+ assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }));
+
+ // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
+ // AND new value is the same as in the fork' best block
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(None, vec![correct_id(4)])
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: Some(4) })
+ .with_header(test_header(4)),
+ 1024, test_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(),
+ Some(CommitOperation::AppendNewBlock(0, correct_id(5))));
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.updated_meta().is_none());
+ // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
+ // AND new value is the same as in the fork' best block
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(),
+ Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) })));
+ assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }));
+
+ // when trying to insert non-final block AND it forks unfinalized fork
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(4)])
+ .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4)),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(),
+ Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) })));
+ assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] }));
+
+ // when trying to insert non-final block AND there are no unfinalized forks
+ // AND value is the same as last finalized
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None);
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.updated_meta().is_none());
+ // when trying to insert non-final block AND there are no unfinalized forks
+ // AND value differs from last finalized
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(),
+ Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) })));
+ assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }));
+
+ // when inserting finalized entry AND there are no previous finalzed entries
+ let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2));
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
+ assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }));
+ // when inserting finalized entry AND value is the same as in previous finalized
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.updated_meta().is_none());
+ // when inserting finalized entry AND value differs from previous finalized
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
+ assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }));
+
+ // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
+ }
+
+ #[test]
+ fn list_on_block_finalized_works() {
+ // finalization does not finalizes entry if it does not exists
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(5)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.updated_meta().is_none());
+ // finalization finalizes entry
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(5)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }),
+ 1024, correct_id(4)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(5), Some(Entry { valid_from: correct_id(5), value: Some(5) }), vec![0].into_iter().collect())));
+ assert!(tx.inserted_entries().is_empty());
+ assert!(tx.removed_entries().is_empty());
+ assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }));
+ // finalization removes abandoned forks
+ let cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }),
+ 1024, correct_id(2)
+ );
+ let mut tx = DummyTransaction::new();
+ assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(),
+ Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
+ }
+
+ #[test]
+ fn list_transaction_commit_works() {
+ let mut cache = ListCache::new(
+ DummyStorage::new()
+ .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)])
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+ .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: Some(6) }),
+ 1024, correct_id(2)
+ );
+
+ // when new block is appended to unfinalized fork
+ cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6)));
+ assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6)));
+ // when new entry is appnded to unfinalized fork
+ cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: Some(7) }));
+ assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7)));
+ assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: Some(7) });
+ // when new fork is added
+ cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: Some(10) }));
+ assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10)));
+ assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: Some(10) });
+ // when block is finalized + entry is finalized + unfinalized forks are deleted
+ cache.on_transaction_commit(CommitOperation::BlockFinalized(correct_id(20), Some(Entry { valid_from: correct_id(20), value: Some(20) }), vec![0, 1, 2].into_iter().collect()));
+ assert_eq!(cache.best_finalized_block, correct_id(20));
+ assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: Some(20) }));
+ assert!(cache.unfinalized.is_empty());
+ }
+
+ #[test]
+ fn list_find_unfinalized_fork_works() {
+ // ----------> [3]
+ // --- [2] ---------> 4 ---> [5]
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)])
+ .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(test_header(5)),
+ 1024, correct_id(0)
+ ).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5));
+ // --- [2] ---------------> [5]
+ // ----------> [3] ---> 4
+ assert_eq!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)])
+ .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(test_header(5))
+ .with_header(fork_header(0, 1, 2))
+ .with_header(fork_header(0, 1, 3))
+ .with_header(fork_header(0, 1, 4)),
+ 1024, correct_id(0)
+ ).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3));
+ // --- [2] ---------------> [5]
+ // ----------> [3]
+ // -----------------> 4
+ assert!(ListCache::new(
+ DummyStorage::new()
+ .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)])
+ .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+ .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) })
+ .with_header(test_header(2))
+ .with_header(test_header(3))
+ .with_header(test_header(4))
+ .with_header(test_header(5))
+ .with_header(fork_header(0, 1, 3))
+ .with_header(fork_header(0, 1, 4))
+ .with_header(fork_header(1, 1, 2))
+ .with_header(fork_header(1, 1, 3))
+ .with_header(fork_header(1, 1, 4)),
+ 1024, correct_id(0)
+ ).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none());
+ }
+
+ #[test]
+ fn fork_matches_works() {
+ // when block is not within list range
+ let storage = DummyStorage::new()
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .matches(&storage, &test_id(20)).unwrap(), false);
+ // when block is not connected to the begin block
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3))
+ .with_header(fork_header(0, 2, 4))
+ .with_header(fork_header(0, 2, 3));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .matches(&storage, &fork_id(0, 2, 4)).unwrap(), false);
+ // when block is not connected to the end block
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3))
+ .with_header(fork_header(0, 3, 4));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .matches(&storage, &fork_id(0, 3, 4)).unwrap(), false);
+ // when block is connected to the begin block AND end is open
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: Some(100) })
+ .with_header(test_header(5))
+ .with_header(test_header(6));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .matches(&storage, &correct_id(6)).unwrap(), true);
+ // when block is connected to the begin block AND to the end block
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .matches(&storage, &correct_id(4)).unwrap(), true);
+ }
+
+ #[test]
+ fn fork_try_append_works() {
+ // when best block is unknown
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .try_append(&test_id(100)), false);
+ // when best block is known but different
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .try_append(&test_id(101)), false);
+ // when best block is known and the same
+ assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: None } }
+ .try_append(&test_id(100)), true);
+ }
+
+ #[test]
+ fn fork_try_append_or_fork_works() {
+ // when there's no entry before parent
+ let storage = DummyStorage::new()
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None);
+ // when parent does not belong to the fork
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3))
+ .with_header(fork_header(0, 2, 4))
+ .with_header(fork_header(0, 2, 3));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None);
+ // when the entry before parent is the head entry
+ let storage = DummyStorage::new()
+ .with_entry(ComplexBlockId::new(test_header(5).hash(), 5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_header(test_header(6))
+ .with_header(test_header(5));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+ .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append));
+ // when the parent located after last finalized entry
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(6))
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3))
+ .with_header(fork_header(0, 4, 5));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } }
+ .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))));
+ // when the parent located before last finalized entry
+ let storage = DummyStorage::new()
+ .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+ .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+ .with_header(test_header(6))
+ .with_header(test_header(5))
+ .with_header(test_header(4))
+ .with_header(test_header(3))
+ .with_header(fork_header(0, 4, 5));
+ assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } }
+ .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None);
+ }
+
+ #[test]
+ fn fork_destroy_works() {
+ // when we reached finalized entry without iterations
+ let storage = DummyStorage::new().with_id(100, 100.into());
+ let mut tx = DummyTransaction::new();
+ Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .destroy(&storage, &mut tx, Some(200)).unwrap();
+ assert!(tx.removed_entries().is_empty());
+ // when we reach finalized entry with iterations
+ let storage = DummyStorage::new()
+ .with_id(10, 10.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(50) })
+ .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) })
+ .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: Some(10) })
+ .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: Some(5) })
+ .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: None });
+ let mut tx = DummyTransaction::new();
+ Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .destroy(&storage, &mut tx, Some(200)).unwrap();
+ assert_eq!(*tx.removed_entries(),
+ vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect());
+ // when we reach beginning of fork before finalized block
+ let storage = DummyStorage::new()
+ .with_id(10, 10.into())
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+ let mut tx = DummyTransaction::new();
+ Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+ .destroy(&storage, &mut tx, Some(200)).unwrap();
+ assert_eq!(*tx.removed_entries(),
+ vec![test_id(100).hash, test_id(50).hash].into_iter().collect());
+ }
+
+ #[test]
+ fn is_connected_to_block_fails() {
+ // when storage returns error
+ assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, &test_id(1), &test_id(100)).is_err());
+ // when there's no header in the storage
+ assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), &test_id(100)).is_err());
+ }
+
+ #[test]
+ fn is_connected_to_block_works() {
+ // when without iterations we end up with different block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(1)),
+ &test_id(1), &correct_id(1)).unwrap(), false);
+ // when with ASC iterations we end up with different block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(0))
+ .with_header(test_header(1))
+ .with_header(test_header(2)),
+ &test_id(0), &correct_id(2)).unwrap(), false);
+ // when with DESC iterations we end up with different block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(0))
+ .with_header(test_header(1))
+ .with_header(test_header(2)),
+ &correct_id(2), &test_id(0)).unwrap(), false);
+ // when without iterations we end up with the same block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(1)),
+ &correct_id(1), &correct_id(1)).unwrap(), true);
+ // when with ASC iterations we end up with the same block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(0))
+ .with_header(test_header(1))
+ .with_header(test_header(2)),
+ &correct_id(0), &correct_id(2)).unwrap(), true);
+ // when with DESC iterations we end up with the same block
+ assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+ .with_header(test_header(0))
+ .with_header(test_header(1))
+ .with_header(test_header(2)),
+ &correct_id(2), &correct_id(0)).unwrap(), true);
+ }
+
+ #[test]
+ fn is_finalized_block_fails() {
+ // when storage returns error
+ assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err());
+
+ }
+
+ #[test]
+ fn is_finalized_block_works() {
+ // when number of block is larger than last finalized block
+ assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false);
+ // when there's no hash for this block number in the database
+ assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false);
+ // when there's different hash for this block number in the database
+ assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new()
+ .with_id(1, From::from(2)), &test_id(1), 100).unwrap(), false);
+ // when there's the same hash for this block number in the database
+ assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new()
+ .with_id(1, From::from(1)), &test_id(1), 100).unwrap(), true);
+ }
+
+ #[test]
+ fn read_forks_fails() {
+ // when storage returns error during finalized entry read
+ assert!(read_forks::(&FaultyStorage, Metadata {
+ finalized: Some(test_id(1)),
+ unfinalized: vec![],
+ }).is_err());
+ // when storage returns error during unfinalized entry read
+ assert!(read_forks::(&FaultyStorage, Metadata {
+ finalized: None,
+ unfinalized: vec![test_id(1)],
+ }).is_err());
+ // when finalized entry is not found
+ assert!(read_forks::(&DummyStorage::new(), Metadata {
+ finalized: Some(test_id(1)),
+ unfinalized: vec![],
+ }).is_err());
+ // when unfinalized entry is not found
+ assert!(read_forks::(&DummyStorage::new(), Metadata {
+ finalized: None,
+ unfinalized: vec![test_id(1)],
+ }).is_err());
+ }
+
+ #[test]
+ fn read_forks_works() {
+ let storage = DummyStorage::new()
+ .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(11) })
+ .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: None })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(33) });
+ let expected = (
+ Some(Entry { valid_from: test_id(10), value: Some(11) }),
+ vec![
+ Fork { best_block: None, head: Entry { valid_from: test_id(20), value: None } },
+ Fork { best_block: None, head: Entry { valid_from: test_id(30), value: Some(33) } },
+ ],
+ );
+
+ assert_eq!(expected, read_forks(&storage, Metadata {
+ finalized: Some(test_id(10)),
+ unfinalized: vec![test_id(20), test_id(30)],
+ }).unwrap());
+ }
+
+ #[test]
+ fn ancient_entries_are_pruned() {
+ let cache = ListCache::new(DummyStorage::new()
+ .with_id(10, 10.into())
+ .with_id(20, 20.into())
+ .with_id(30, 30.into())
+ .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: Some(10) })
+ .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) })
+ .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(30) }),
+ 10, test_id(9));
+ let mut tx = DummyTransaction::new();
+
+ // when finalizing entry #10: no entries pruned
+ cache.prune_finalized_entries(&mut tx, &test_id(10));
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.inserted_entries().is_empty());
+ // when finalizing entry #19: no entries pruned
+ cache.prune_finalized_entries(&mut tx, &test_id(19));
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.inserted_entries().is_empty());
+ // when finalizing entry #20: no entries pruned
+ cache.prune_finalized_entries(&mut tx, &test_id(20));
+ assert!(tx.removed_entries().is_empty());
+ assert!(tx.inserted_entries().is_empty());
+ // when finalizing entry #30: entry 10 pruned + entry 20 is truncated
+ cache.prune_finalized_entries(&mut tx, &test_id(30));
+ assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect());
+ assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect());
+ }
+}
diff --git a/substrate/core/client/db/src/cache/list_entry.rs b/substrate/core/client/db/src/cache/list_entry.rs
new file mode 100644
index 0000000000..bf29885fcf
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_entry.rs
@@ -0,0 +1,164 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate. If not, see .
+
+//! List-cache storage entries.
+
+use client::error::Result as ClientResult;
+use runtime_primitives::traits::{Block as BlockT, NumberFor};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_storage::{Storage};
+
+/// Single list-based cache entry.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Entry {
+ /// first block, when this value became actual
+ pub valid_from: ComplexBlockId,
+ /// None means that we do not know the value starting from `valid_from` block
+ pub value: Option,
+}
+
+/// Internal representation of the single list-based cache entry. The entry points to the
+/// previous entry in the cache, allowing us to traverse back in time in list-style.
+#[derive(Debug, Encode, Decode)]
+#[cfg_attr(test, derive(Clone, PartialEq))]
+pub struct StorageEntry {
+ /// None if valid from the beginning
+ pub prev_valid_from: Option>,
+ /// None means that we do not know the value starting from `valid_from` block
+ pub value: Option,
+}
+
+impl Entry {
+ /// Returns Some if the entry should be updated with the new value.
+ pub fn try_update(&self, value: Option) -> Option> {
+ match self.value == value {
+ true => None,
+ false => Some(StorageEntry {
+ prev_valid_from: Some(self.valid_from.clone()),
+ value,
+ }),
+ }
+ }
+
+ /// Wrapper that calls search_before to get range where the given block fits.
+ pub fn search_best_range_before>(
+ &self,
+ storage: &S,
+ block: NumberFor,
+ ) -> ClientResult, Option>)>> {
+ Ok(self.search_best_before(storage, block, false)?
+ .map(|(entry, next)| (entry.valid_from, next)))
+ }
+
+ /// Searches the list, ending with THIS entry for the best entry preceeding (or at)
+ /// given block number.
+ /// If the entry is found, result is the entry and the block id of next entry (if exists).
+ /// NOTE that this function does not check that the passed block is actually linked to
+ /// the blocks it found.
+ pub fn search_best_before>(
+ &self,
+ storage: &S,
+ block: NumberFor,
+ require_value: bool,
+ ) -> ClientResult, Option>)>> {
+ // we're looking for the best value
+ let mut next = None;
+ let mut current = self.valid_from.clone();
+ if block >= self.valid_from.number {
+ let value = if require_value { self.value.clone() } else { None };
+ return Ok(Some((Entry { valid_from: current, value }, next)));
+ }
+
+ // else - travel back in time
+ loop {
+ let entry = storage.require_entry(¤t)?;
+ if block >= current.number {
+ return Ok(Some((Entry { valid_from: current, value: entry.value }, next)));
+ }
+
+ next = Some(current);
+ current = match entry.prev_valid_from {
+ Some(prev_valid_from) => prev_valid_from,
+ None => return Ok(None),
+ };
+ }
+ }
+}
+
+impl StorageEntry {
+ /// Converts storage entry into an entry, valid from given block.
+ pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry {
+ Entry {
+ valid_from,
+ value: self.value,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use cache::list_cache::tests::test_id;
+ use cache::list_storage::tests::{DummyStorage, FaultyStorage};
+ use super::*;
+
+ #[test]
+ fn entry_try_update_works() {
+ // when trying to update with the same None value
+ assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: None }.try_update(None), None);
+ // when trying to update with the same Some value
+ assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(1)), None);
+ // when trying to update with different None value
+ assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(None),
+ Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: None }));
+ // when trying to update with different Some value
+ assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(2)),
+ Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(2) }));
+ }
+
+ #[test]
+ fn entry_search_best_before_fails() {
+ // when storage returns error
+ assert!(Entry::<_, u64> { valid_from: test_id(100), value: None }.search_best_before(&FaultyStorage, 50, false).is_err());
+ }
+
+ #[test]
+ fn entry_search_best_before_works() {
+ // when block is better than our best block AND value is not required
+ assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+ .search_best_before(&DummyStorage::new(), 150, false).unwrap(),
+ Some((Entry::<_, u64> { valid_from: test_id(100), value: None }, None)));
+ // when block is better than our best block AND value is required
+ assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+ .search_best_before(&DummyStorage::new(), 150, true).unwrap(),
+ Some((Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }, None)));
+ // when block is found between two entries
+ assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+ .search_best_before(&DummyStorage::new()
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(50) }),
+ 75, false).unwrap(),
+ Some((Entry::<_, u64> { valid_from: test_id(50), value: Some(50) }, Some(test_id(100)))));
+ // when block is not found
+ assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+ .search_best_before(&DummyStorage::new()
+ .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+ .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }),
+ 30, true).unwrap(),
+ None);
+ }
+}
diff --git a/substrate/core/client/db/src/cache/list_storage.rs b/substrate/core/client/db/src/cache/list_storage.rs
new file mode 100644
index 0000000000..b67fefb301
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_storage.rs
@@ -0,0 +1,378 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate. If not, see .
+
+//! List-cache storage definition and implementation.
+
+use std::sync::Arc;
+
+use kvdb::{KeyValueDB, DBTransaction};
+
+use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
+use codec::{Encode, Decode};
+use runtime_primitives::generic::BlockId;
+use runtime_primitives::traits::{Block as BlockT, NumberFor};
+use utils::{self, db_err, meta_keys};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_cache::{CommitOperation, Fork};
+use cache::list_entry::{Entry, StorageEntry};
+
+/// Single list-cache metadata.
+#[derive(Debug)]
+#[cfg_attr(test, derive(Clone, PartialEq))]
+pub struct Metadata {
+ /// Block at which best finalized entry is stored.
+ pub finalized: Option>,
+ /// A set of blocks at which best unfinalized entries are stored.
+ pub unfinalized: Vec>,
+}
+
+/// Readonly list-cache storage trait.
+pub trait Storage {
+ /// Reads hash of the block at given number.
+ fn read_id(&self, at: NumberFor) -> ClientResult>;
+
+ /// Reads header of the block with given hash.
+ fn read_header(&self, at: &Block::Hash) -> ClientResult >;
+
+ /// Reads cache metadata: best finalized entry (if some) and the list.
+ fn read_meta(&self) -> ClientResult>;
+
+ /// Reads cache entry from the storage.
+ fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>>;
+
+ /// Reads referenced (and thus existing) cache entry from the storage.
+ fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> {
+ self.read_entry(at)
+ .and_then(|entry| entry
+ .ok_or_else(|| ClientError::from(
+ ClientErrorKind::Backend(format!("Referenced cache entry at {:?} is not found", at)))))
+ }
+}
+
+/// List-cache storage transaction.
+pub trait StorageTransaction {
+ /// Insert storage entry at given block.
+ fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry);
+
+ /// Delete storage entry at given block.
+ fn remove_storage_entry(&mut self, at: &ComplexBlockId);
+
+ /// Update metadata of the cache.
+ fn update_meta(
+ &mut self,
+ best_finalized_entry: Option<&Entry>,
+ unfinalized: &[Fork],
+ operation: &CommitOperation,
+ );
+}
+
+/// A set of columns used by the DbStorage.
+#[derive(Debug)]
+pub struct DbColumns {
+ /// Column holding cache meta.
+ pub meta: Option,
+ /// Column holding the mapping of { block number => block hash } for blocks of the best chain.
+ pub hash_lookup: Option,
+ /// Column holding the mapping of { block hash => block header }.
+ pub header: Option,
+ /// Column holding cache entries.
+ pub cache: Option,
+}
+
+/// Database-backed list cache storage.
+pub struct DbStorage {
+ name: Vec,
+ meta_key: Vec,
+ db: Arc,
+ columns: DbColumns,
+}
+
+impl DbStorage {
+ /// Create new database-backed list cache storage.
+ pub fn new(name: Vec, db: Arc, columns: DbColumns) -> Self {
+ let meta_key = meta::key(&name);
+ DbStorage { name, meta_key, db, columns }
+ }
+
+ /// Get reference to the database.
+ pub fn db(&self) -> &Arc { &self.db }
+
+ /// Get reference to the database columns.
+ pub fn columns(&self) -> &DbColumns { &self.columns }
+
+ /// Encode block id for storing as a key in cache column.
+ /// We append prefix to the actual encoding to allow several caches
+ /// store entries in the same column.
+ pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec {
+ let mut encoded = self.name.clone();
+ encoded.extend(block.hash.as_ref());
+ encoded
+ }
+}
+
+impl Storage for DbStorage {
+ fn read_id(&self, at: NumberFor) -> ClientResult> {
+ utils::read_id::(&*self.db, self.columns.hash_lookup, BlockId::Number(at))
+ }
+
+ fn read_header(&self, at: &Block::Hash) -> ClientResult> {
+ utils::read_header::(&*self.db, self.columns.hash_lookup, self.columns.header, BlockId::Hash(*at))
+ }
+
+ fn read_meta(&self) -> ClientResult> {
+ self.db.get(self.columns.meta, &self.meta_key)
+ .map_err(db_err)
+ .and_then(|meta| match meta {
+ Some(meta) => meta::decode(&*meta),
+ None => Ok(Metadata {
+ finalized: None,
+ unfinalized: Vec::new(),
+ }),
+ })
+ }
+
+ fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> {
+ self.db.get(self.columns.cache, &self.encode_block_id(at))
+ .map_err(db_err)
+ .and_then(|entry| match entry {
+ Some(entry) => StorageEntry::::decode(&mut &entry[..])
+ .ok_or_else(|| ClientErrorKind::Backend("Failed to decode cache entry".into()).into())
+ .map(Some),
+ None => Ok(None),
+ })
+ }
+}
+
+/// Database-backed list cache storage transaction.
+pub struct DbStorageTransaction<'a> {
+ storage: &'a DbStorage,
+ tx: &'a mut DBTransaction,
+}
+
+impl<'a> DbStorageTransaction<'a> {
+ /// Create new database transaction.
+ pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self {
+ DbStorageTransaction { storage, tx }
+ }
+}
+
+impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> {
+ fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) {
+ self.tx.put(self.storage.columns.cache, &self.storage.encode_block_id(at), &entry.encode());
+ }
+
+ fn remove_storage_entry(&mut self, at: &ComplexBlockId) {
+ self.tx.delete(self.storage.columns.cache, &self.storage.encode_block_id(at));
+ }
+
+ fn update_meta(
+ &mut self,
+ best_finalized_entry: Option<&Entry>,
+ unfinalized: &[Fork],
+ operation: &CommitOperation,
+ ) {
+ self.tx.put(
+ self.storage.columns.meta,
+ &self.storage.meta_key,
+ &meta::encode(best_finalized_entry, unfinalized, operation));
+ }
+}
+
+/// Metadata related functions.
+mod meta {
+ use super::*;
+
+ /// Convert cache name into cache metadata key.
+ pub fn key(name: &[u8]) -> Vec {
+ let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec();
+ key_name.extend_from_slice(name);
+ key_name
+ }
+
+ /// Encode cache metadata 'applying' commit operation before encoding.
+ pub fn encode(
+ best_finalized_entry: Option<&Entry>,
+ unfinalized: &[Fork],
+ op: &CommitOperation
+ ) -> Vec {
+ let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from);
+ let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>();
+
+ match op {
+ CommitOperation::AppendNewBlock(_, _) => (),
+ CommitOperation::AppendNewEntry(index, ref entry) => {
+ unfinalized[*index] = &entry.valid_from;
+ },
+ CommitOperation::AddNewFork(ref entry) => {
+ unfinalized.push(&entry.valid_from);
+ },
+ CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => {
+ finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from);
+ for fork_index in forks.iter().rev() {
+ unfinalized.remove(*fork_index);
+ }
+ },
+ }
+
+ (finalized, unfinalized).encode()
+ }
+
+ /// Decode meta information.
+ pub fn decode(encoded: &[u8]) -> ClientResult> {
+ let input = &mut &*encoded;
+ let finalized: Option> = Decode::decode(input)
+ .ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
+ let unfinalized: Vec> = Decode::decode(input)
+ .ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
+
+ Ok(Metadata { finalized, unfinalized })
+ }
+}
+
+#[cfg(test)]
+pub mod tests {
+ use std::collections::{HashMap, HashSet};
+ use runtime_primitives::traits::Header as HeaderT;
+ use super::*;
+
+ pub struct FaultyStorage;
+
+ impl Storage for FaultyStorage {
+ fn read_id(&self, _at: NumberFor) -> ClientResult> {
+ Err(ClientErrorKind::Backend("TestError".into()).into())
+ }
+
+ fn read_header(&self, _at: &Block::Hash) -> ClientResult > {
+ Err(ClientErrorKind::Backend("TestError".into()).into())
+ }
+
+ fn read_meta(&self) -> ClientResult> {
+ Err(ClientErrorKind::Backend("TestError".into()).into())
+ }
+
+ fn read_entry(&self, _at: &ComplexBlockId) -> ClientResult>> {
+ Err(ClientErrorKind::Backend("TestError".into()).into())
+ }
+ }
+
+ pub struct DummyStorage {
+ meta: Metadata,
+ ids: HashMap, Block::Hash>,
+ headers: HashMap,
+ entries: HashMap>,
+ }
+
+ impl DummyStorage {
+ pub fn new() -> Self {
+ DummyStorage {
+ meta: Metadata {
+ finalized: None,
+ unfinalized: Vec::new(),
+ },
+ ids: HashMap::new(),
+ headers: HashMap::new(),
+ entries: HashMap::new(),
+ }
+ }
+
+ pub fn with_meta(mut self, finalized: Option>, unfinalized: Vec>) -> Self {
+ self.meta.finalized = finalized;
+ self.meta.unfinalized = unfinalized;
+ self
+ }
+
+ pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self {
+ self.ids.insert(at, id);
+ self
+ }
+
+ pub fn with_header(mut self, header: Block::Header) -> Self {
+ self.headers.insert(header.hash(), header);
+ self
+ }
+
+ pub fn with_entry(mut self, at: ComplexBlockId, entry: StorageEntry) -> Self {
+ self.entries.insert(at.hash, entry);
+ self
+ }
+ }
+
+ impl Storage for DummyStorage {
+ fn read_id(&self, at: NumberFor) -> ClientResult> {
+ Ok(self.ids.get(&at).cloned())
+ }
+
+ fn read_header(&self, at: &Block::Hash) -> ClientResult > {
+ Ok(self.headers.get(&at).cloned())
+ }
+
+ fn read_meta(&self) -> ClientResult> {
+ Ok(self.meta.clone())
+ }
+
+ fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> {
+ Ok(self.entries.get(&at.hash).cloned())
+ }
+ }
+
+ pub struct DummyTransaction