Introduce notion of finality to substrate (#760)

* finalization for in_mem

* fetch last finalized block

* pruning: use canonical term instead of final

* finalize blocks in full node

* begin to port light client DB

* add tree-route

* keep number index consistent in full nodes

* fix tests

* disable cache and finish porting light client

* add AsMut to system module

* final leaf is always best

* fix all tests

* Fix comment and trace

* removed unused Into call

* add comment on behavior of `finalize_block`
This commit is contained in:
Robert Habermeier
2018-09-21 15:56:21 +02:00
committed by Gav Wood
parent 28cc4d0fd6
commit b7d095a2e0
19 changed files with 976 additions and 370 deletions
+37 -12
View File
@@ -27,12 +27,13 @@ use codec::{Codec, Encode, Decode};
use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, As, NumberFor};
use utils::{COLUMN_META, BlockKey, db_err, meta_keys, read_id, db_key_to_number, number_to_db_key};
use utils::{COLUMN_META, BlockLookupKey, db_err, meta_keys, lookup_key_to_number, number_to_lookup_key};
/// Database-backed cache of blockchain data.
pub struct DbCache<Block: BlockT> {
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_at: DbCacheList<Block, Vec<AuthorityId>>,
}
@@ -45,16 +46,19 @@ impl<Block> DbCache<Block>
pub fn new(
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>
) -> ClientResult<Self> {
Ok(DbCache {
db: db.clone(),
block_index_column,
header_column,
authorities_at: DbCacheList::new(db, meta_keys::BEST_AUTHORITIES, authorities_column)?,
})
}
/// Get authorities_cache.
#[allow(unused)]
pub fn authorities_at_cache(&self) -> &DbCacheList<Block, Vec<AuthorityId>> {
&self.authorities_at
}
@@ -66,10 +70,27 @@ impl<Block> BlockchainCache<Block> for DbCache<Block>
NumberFor<Block>: As<u64>,
{
fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
let authorities_at = read_id(&*self.db, self.block_index_column, at).and_then(|at| match at {
Some(at) => self.authorities_at.value_at_key(at),
None => Ok(None),
});
use runtime_primitives::traits::Header as HeaderT;
let number = match at {
BlockId::Number(n) => Ok(number_to_lookup_key(n)),
BlockId::Hash(h) => {
let maybe_header = ::utils::read_header::<Block>(
&*self.db,
self.block_index_column,
self.header_column,
BlockId::Hash(h),
);
match maybe_header {
Ok(Some(hdr)) => Ok(number_to_lookup_key(*hdr.number())),
Ok(None) => return None, // no such block.
Err(e) => Err(e),
}
}
};
let authorities_at = number.and_then(|at| self.authorities_at.value_at_key(at));
match authorities_at {
Ok(authorities) => authorities,
@@ -128,7 +149,7 @@ impl<Block, T> DbCacheList<Block, T>
.map_err(db_err)
.and_then(|block| match block {
Some(block) => {
let valid_from = db_key_to_number(&block)?;
let valid_from = lookup_key_to_number(&block)?;
read_storage_entry::<Block, T>(&*db, column, valid_from)
.map(|entry| Some(Entry {
valid_from,
@@ -155,6 +176,7 @@ impl<Block, T> DbCacheList<Block, T>
/// Commits the new best pending value to the database. Returns Some if best entry must
/// be updated after transaction is committed.
#[allow(unused)]
pub fn commit_best_entry(
&self,
transaction: &mut DBTransaction,
@@ -174,7 +196,7 @@ impl<Block, T> DbCacheList<Block, T>
return None;
}
let valid_from_key = number_to_db_key(valid_from);
let valid_from_key = number_to_lookup_key(valid_from);
transaction.put(COLUMN_META, self.meta_key, &valid_from_key);
transaction.put(self.column, &valid_from_key, &StorageEntry {
prev_valid_from: best_entry.map(|b| b.valid_from),
@@ -189,12 +211,14 @@ impl<Block, T> DbCacheList<Block, T>
/// Updates the best in-memory cache entry. Must be called after transaction with changes
/// from commit_best_entry has been committed.
#[allow(unused)]
pub fn update_best_entry(&self, best_entry: Option<Entry<NumberFor<Block>, T>>) {
*self.best_entry.write() = best_entry;
}
/// Prune all entries from the beginning up to the block (including entry at the number). Returns
/// the number of pruned entries. Pruning never deletes the latest entry in the cache.
#[allow(unused)]
pub fn prune_entries(
&self,
transaction: &mut DBTransaction,
@@ -228,7 +252,7 @@ impl<Block, T> DbCacheList<Block, T>
.expect("referenced entry exists; entry_to_remove is a reference to the entry; qed");
if current_entry != last_entry_to_keep {
transaction.delete(self.column, &number_to_db_key(current_entry));
transaction.delete(self.column, &number_to_lookup_key(current_entry));
pruned += 1;
}
entry_to_remove = entry.prev_valid_from;
@@ -237,15 +261,15 @@ impl<Block, T> DbCacheList<Block, T>
let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, last_entry_to_keep)?
.expect("last_entry_to_keep >= first_entry_to_remove; that means that we're leaving this entry in the db; qed");
entry.prev_valid_from = None;
transaction.put(self.column, &number_to_db_key(last_entry_to_keep), &entry.encode());
transaction.put(self.column, &number_to_lookup_key(last_entry_to_keep), &entry.encode());
Ok(pruned)
}
/// Reads the cached value, actual at given block. Returns None if the value was not cached
/// or if it has been pruned.
fn value_at_key(&self, key: BlockKey) -> ClientResult<Option<T>> {
let at = db_key_to_number::<NumberFor<Block>>(&key)?;
fn value_at_key(&self, key: BlockLookupKey) -> ClientResult<Option<T>> {
let at = lookup_key_to_number::<NumberFor<Block>>(&key)?;
let best_valid_from = match self.best_entry() {
// there are entries in cache
Some(best_entry) => {
@@ -291,7 +315,7 @@ fn read_storage_entry<Block, T>(
NumberFor<Block>: As<u64>,
T: Codec,
{
db.get(column, &number_to_db_key(number))
db.get(column, &number_to_lookup_key(number))
.and_then(|entry| match entry {
Some(entry) => Ok(StorageEntry::<NumberFor<Block>, T>::decode(&mut &entry[..])),
None => Ok(None),
@@ -324,6 +348,7 @@ mod tests {
}
#[test]
#[ignore] // TODO: unignore when cache reinstated.
fn best_authorities_are_updated() {
let db = LightStorage::new_test();
let authorities_at: Vec<(usize, Option<Entry<u64, Vec<AuthorityId>>>)> = vec![
+226 -84
View File
@@ -49,6 +49,7 @@ use std::sync::Arc;
use std::path::PathBuf;
use std::io;
use client::backend::NewBlockState;
use codec::{Decode, Encode};
use hashdb::Hasher;
use kvdb::{KeyValueDB, DBTransaction};
@@ -57,14 +58,12 @@ use parking_lot::RwLock;
use primitives::{H256, AuthorityId, Blake2Hasher, RlpCodec};
use runtime_primitives::generic::BlockId;
use runtime_primitives::bft::Justification;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, Hash, HashFor,
NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::BuildStorage;
use state_machine::backend::Backend as StateBackend;
use executor::RuntimeInfo;
use state_machine::{CodeExecutor, DBValue, ExecutionStrategy};
use utils::{Meta, db_err, meta_keys, number_to_db_key, db_key_to_number, open_database,
read_db, read_id, read_meta};
use utils::{Meta, db_err, meta_keys, open_database, read_db, read_id, read_meta};
use state_db::StateDb;
pub use state_db::PruningMode;
@@ -104,7 +103,7 @@ mod columns {
pub const META: Option<u32> = Some(0);
pub const STATE: Option<u32> = Some(1);
pub const STATE_META: Option<u32> = Some(2);
pub const BLOCK_INDEX: Option<u32> = Some(3);
pub const HASH_LOOKUP: Option<u32> = Some(3);
pub const HEADER: Option<u32> = Some(4);
pub const BODY: Option<u32> = Some(5);
pub const JUSTIFICATION: Option<u32> = Some(6);
@@ -115,7 +114,7 @@ struct PendingBlock<Block: BlockT> {
header: Block::Header,
justification: Option<Justification<Block::Hash>>,
body: Option<Vec<Block::Extrinsic>>,
is_best: bool,
leaf_state: NewBlockState,
}
// wrapper that implements trait required for state_db
@@ -144,27 +143,33 @@ impl<Block: BlockT> BlockchainDb<Block> {
})
}
fn update_meta(&self, hash: Block::Hash, number: <Block::Header as HeaderT>::Number, is_best: bool) {
fn update_meta(
&self,
hash: Block::Hash,
number: <Block::Header as HeaderT>::Number,
is_best: bool,
is_finalized: bool
) {
let mut meta = self.meta.write();
if number == Zero::zero() {
meta.genesis_hash = hash;
}
if is_best {
let mut meta = self.meta.write();
if number == Zero::zero() {
meta.genesis_hash = hash;
}
meta.best_number = number;
meta.best_hash = hash;
}
if is_finalized {
meta.finalized_number = number;
meta.finalized_hash = hash;
}
}
}
impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
fn header(&self, id: BlockId<Block>) -> Result<Option<Block::Header>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()),
}
None => Ok(None),
}
::utils::read_header(&*self.db, columns::HASH_LOOKUP, columns::HEADER, id)
}
fn info(&self) -> Result<client::blockchain::Info<Block>, client::error::Error> {
@@ -178,7 +183,12 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
fn status(&self, id: BlockId<Block>) -> Result<client::blockchain::BlockStatus, client::error::Error> {
let exists = match id {
BlockId::Hash(_) => read_id(&*self.db, columns::BLOCK_INDEX, id)?.is_some(),
BlockId::Hash(_) => read_db(
&*self.db,
columns::HASH_LOOKUP,
columns::HEADER,
id
)?.is_some(),
BlockId::Number(n) => n <= self.meta.read().best_number,
};
match exists {
@@ -188,23 +198,20 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
}
fn number(&self, hash: Block::Hash) -> Result<Option<<Block::Header as HeaderT>::Number>, client::error::Error> {
read_id::<Block>(&*self.db, columns::BLOCK_INDEX, BlockId::Hash(hash))
.and_then(|key| match key {
Some(key) => Ok(Some(db_key_to_number(&key)?)),
None => Ok(None),
})
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
}
fn hash(&self, number: <Block::Header as HeaderT>::Number) -> Result<Option<Block::Hash>, client::error::Error> {
read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x|
x.map(|raw| HashFor::<Block>::hash(&raw[..])).map(Into::into)
)
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
}
}
impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
fn body(&self, id: BlockId<Block>) -> Result<Option<Vec<Block::Extrinsic>>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::BODY, id)? {
match read_db(&*self.db, columns::HASH_LOOKUP, columns::BODY, id)? {
Some(body) => match Decode::decode(&mut &body[..]) {
Some(body) => Ok(Some(body)),
None => return Err(client::error::ErrorKind::Backend("Error decoding body".into()).into()),
@@ -214,7 +221,7 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
}
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification<Block::Hash>>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::JUSTIFICATION, id)? {
match read_db(&*self.db, columns::HASH_LOOKUP, columns::JUSTIFICATION, id)? {
Some(justification) => match Decode::decode(&mut &justification[..]) {
Some(justification) => Ok(Some(justification)),
None => return Err(client::error::ErrorKind::Backend("Error decoding justification".into()).into()),
@@ -223,6 +230,10 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
}
}
fn last_finalized(&self) -> Result<Block::Hash, client::error::Error> {
Ok(self.meta.read().finalized_hash.clone())
}
fn cache(&self) -> Option<&client::blockchain::Cache<Block>> {
None
}
@@ -246,13 +257,19 @@ where Block: BlockT,
Ok(Some(&self.old_state))
}
fn set_block_data(&mut self, header: Block::Header, body: Option<Vec<Block::Extrinsic>>, justification: Option<Justification<Block::Hash>>, is_best: bool) -> Result<(), client::error::Error> {
fn set_block_data(
&mut self,
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
leaf_state: NewBlockState,
) -> Result<(), client::error::Error> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
self.pending_block = Some(PendingBlock {
header,
body,
justification,
is_best,
leaf_state,
});
Ok(())
}
@@ -324,7 +341,7 @@ pub struct DbChangesTrieStorage<Block: BlockT> {
impl<Block: BlockT> state_machine::ChangesTrieStorage<Blake2Hasher> for DbChangesTrieStorage<Block> {
fn root(&self, block: u64) -> Result<Option<H256>, String> {
Ok(read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(As::sa(block)))
Ok(read_db::<Block>(&*self.db, columns::HASH_LOOKUP, columns::HEADER, BlockId::Number(As::sa(block)))
.map_err(|err| format!("{}", err))
.and_then(|header| match header {
Some(header) => Block::Header::decode(&mut &header[..])
@@ -345,20 +362,22 @@ impl<Block: BlockT> state_machine::ChangesTrieStorage<Blake2Hasher> for DbChange
}
/// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks.
/// Otherwise, trie nodes are kept only from the most recent block.
/// Otherwise, trie nodes are kept only from some recent blocks.
pub struct Backend<Block: BlockT> {
storage: Arc<StorageDb<Block>>,
tries_change_storage: DbChangesTrieStorage<Block>,
blockchain: BlockchainDb<Block>,
finalization_window: u64,
pruning_window: u64,
}
impl<Block: BlockT> Backend<Block> {
/// Create a new instance of database backend.
pub fn new(config: DatabaseSettings, finalization_window: u64) -> Result<Self, client::error::Error> {
///
/// The pruning window is how old a block must be before the state is pruned.
pub fn new(config: DatabaseSettings, pruning_window: u64) -> Result<Self, client::error::Error> {
let db = open_database(&config, "full")?;
Backend::from_kvdb(db as Arc<_>, config.pruning, finalization_window)
Backend::from_kvdb(db as Arc<_>, config.pruning, pruning_window)
}
#[cfg(test)]
@@ -370,7 +389,7 @@ impl<Block: BlockT> Backend<Block> {
Backend::from_kvdb(db as Arc<_>, PruningMode::keep_blocks(keep_blocks), 0).expect("failed to create test-db")
}
fn from_kvdb(db: Arc<KeyValueDB>, pruning: PruningMode, finalization_window: u64) -> Result<Self, client::error::Error> {
fn from_kvdb(db: Arc<KeyValueDB>, pruning: PruningMode, pruning_window: u64) -> Result<Self, client::error::Error> {
let blockchain = BlockchainDb::new(db.clone())?;
let map_e = |e: state_db::Error<io::Error>| ::client::error::Error::from(format!("State database error: {:?}", e));
let state_db: StateDb<Block::Hash, H256> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?;
@@ -387,9 +406,68 @@ impl<Block: BlockT> Backend<Block> {
storage: Arc::new(storage_db),
tries_change_storage: tries_change_storage,
blockchain,
finalization_window,
pruning_window,
})
}
// write stuff to a transaction after a new block is finalized.
//
// this manages state pruning and ensuring reorgs don't occur.
// this function should only be called if the finalized block is contained
// in the best chain.
fn note_finalized(&self, transaction: &mut DBTransaction, f_header: &Block::Header, f_hash: Block::Hash) -> Result<(), client::error::Error> {
const NOTEWORTHY_FINALIZATION_GAP: u64 = 32;
// TODO: ensure this doesn't conflict with old finalized block.
let meta = self.blockchain.meta.read();
let f_num = f_header.number().clone();
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, f_hash.as_ref());
let (last_finalized_hash, last_finalized_number)
= (meta.finalized_hash.clone(), meta.finalized_number);
let finalized_gap = f_num - last_finalized_number;
if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP {
info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&f_hash, f_num));
} else {
debug!(target: "db", "Finalizing blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&f_hash, f_num));
}
let mut canonicalize_state = |canonical_hash| {
let commit = self.storage.state_db.canonicalize_block(&canonical_hash);
apply_state_commit(transaction, commit);
};
// when finalizing a block, we must also implicitly finalize all the blocks
// in between the last finalized block and this one. That means canonicalizing
// all their states in order.
let number_u64 = f_num.as_();
if number_u64 > self.pruning_window {
let new_canonical = number_u64 - self.pruning_window;
let best_canonical = self.storage.state_db.best_canonical();
for uncanonicalized_number in (best_canonical..new_canonical).map(|x| x + 1) {
let hash = if uncanonicalized_number == number_u64 {
f_hash
} else {
read_id::<Block>(
&*self.blockchain.db,
columns::HASH_LOOKUP,
BlockId::Number(As::sa(uncanonicalized_number))
)?.expect("existence of block with number `new_canonical` \
implies existence of blocks with all nubmers before it; qed")
};
trace!(target: "db", "Canonicalize block #{} ({:?})", uncanonicalized_number, hash);
canonicalize_state(hash);
}
};
Ok(())
}
}
fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet<H256>) {
@@ -430,23 +508,69 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
}
fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> Result<(), client::error::Error> {
use client::blockchain::HeaderBackend;
let mut transaction = DBTransaction::new();
if let Some(pending_block) = operation.pending_block {
let hash = pending_block.header.hash();
let number = pending_block.header.number().clone();
let key = number_to_db_key(number.clone());
transaction.put(columns::HEADER, &key, &pending_block.header.encode());
transaction.put(columns::HEADER, hash.as_ref(), &pending_block.header.encode());
if let Some(body) = pending_block.body {
transaction.put(columns::BODY, &key, &body.encode());
transaction.put(columns::BODY, hash.as_ref(), &body.encode());
}
if let Some(justification) = pending_block.justification {
transaction.put(columns::JUSTIFICATION, &key, &justification.encode());
transaction.put(columns::JUSTIFICATION, hash.as_ref(), &justification.encode());
}
transaction.put(columns::BLOCK_INDEX, hash.as_ref(), &key);
if pending_block.is_best {
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
if pending_block.leaf_state.is_best() {
let meta = self.blockchain.meta.read();
// cannot find tree route with empty DB.
if meta.best_hash != Default::default() {
let parent_hash = *pending_block.header.parent_hash();
let tree_route = ::utils::tree_route::<Block>(
&*self.blockchain.db,
columns::HEADER,
meta.best_hash,
parent_hash,
)?;
// update block number to hash lookup entries.
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
// TODO: can we recover here?
warn!("Safety failure: reverting finalized block {:?}",
(&retracted.number, &retracted.hash));
}
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
}
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
}
}
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(number),
hash.as_ref()
);
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
}
if number == Zero::zero() {
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
}
let mut changeset: state_db::ChangeSet<H256> = state_db::ChangeSet::default();
for (key, (val, rc)) in operation.updates.drain() {
if rc > 0 {
@@ -455,37 +579,51 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
changeset.deleted.push(key.0.into());
}
}
let number_u64 = number.as_().into();
let number_u64 = number.as_();
let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset);
apply_state_commit(&mut transaction, commit);
apply_changes_trie_commit(&mut transaction, operation.changes_trie_updates);
//finalize an older block
if number_u64 > self.finalization_window {
let finalizing_hash = if self.finalization_window == 0 {
Some(hash)
} else {
let finalizing = number_u64 - self.finalization_window;
if finalizing > self.storage.state_db.best_finalized() {
self.blockchain.hash(As::sa(finalizing))?
} else {
None
}
};
if let Some(finalizing_hash) = finalizing_hash {
trace!(target: "db", "Finalizing block #{} ({:?})", number_u64 - self.finalization_window, finalizing_hash);
let commit = self.storage.state_db.finalize_block(&finalizing_hash);
apply_state_commit(&mut transaction, commit);
}
let finalized = match pending_block.leaf_state {
NewBlockState::Final => true,
_ => false,
};
if finalized {
// TODO: ensure best chain contains this block.
self.note_finalized(&mut transaction, &pending_block.header, hash)?;
}
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, pending_block.is_best);
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number,
pending_block.leaf_state.is_best());
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, number, pending_block.is_best);
self.blockchain.update_meta(
hash,
number,
pending_block.leaf_state.is_best(),
finalized,
);
}
Ok(())
}
fn finalize_block(&self, block: BlockId<Block>) -> Result<(), client::error::Error> {
use runtime_primitives::traits::Header;
if let Some(header) = ::client::blockchain::HeaderBackend::header(&self.blockchain, block)? {
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, header.number().clone(), false, true);
Ok(())
} else {
Err(client::error::ErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", block)).into())
}
}
fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> {
Some(&self.tries_change_storage)
}
@@ -501,18 +639,16 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
match self.storage.state_db.revert_one() {
Some(commit) => {
apply_state_commit(&mut transaction, commit);
let removed = self.blockchain.hash(best)?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
let removed = best.clone();
best -= As::sa(1);
let key = number_to_db_key(best.clone());
let hash = self.blockchain.hash(best)?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
transaction.delete(columns::BLOCK_INDEX, removed.as_ref());
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.delete(columns::HASH_LOOKUP, &::utils::number_to_lookup_key(removed));
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, best, true);
self.blockchain.update_meta(hash, best, true, false);
}
None => return Ok(As::sa(c))
}
@@ -593,7 +729,7 @@ mod tests {
header,
Some(vec![]),
None,
true,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
}
@@ -605,7 +741,7 @@ mod tests {
#[test]
fn set_state_data() {
let db = Backend::<Block>::new_test(2);
{
let hash = {
let mut op = db.begin_operation(BlockId::Hash(Default::default())).unwrap();
let mut header = Header {
number: 0,
@@ -625,13 +761,14 @@ mod tests {
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
let hash = header.hash();
op.reset_storage(storage.iter().cloned()).unwrap();
op.set_block_data(
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
@@ -642,13 +779,14 @@ mod tests {
assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
}
hash
};
{
let mut op = db.begin_operation(BlockId::Number(0)).unwrap();
let mut header = Header {
number: 1,
parent_hash: Default::default(),
parent_hash: hash,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
@@ -667,7 +805,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
@@ -711,7 +849,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
@@ -745,7 +883,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
@@ -777,13 +915,17 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none());
// block not yet finalized, so state not pruned.
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_some());
}
backend.finalize_block(BlockId::Number(2)).unwrap();
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none());
}
#[test]
@@ -831,7 +973,7 @@ mod tests {
BlockId::Number(number - 1)
};
let mut op = backend.begin_operation(block_id).unwrap();
op.set_block_data(header, None, None, true).unwrap();
op.set_block_data(header, None, None, NewBlockState::Best).unwrap();
op.update_changes_trie(changes_trie_update).unwrap();
backend.commit_operation(op).unwrap();
+205 -74
View File
@@ -21,6 +21,7 @@ use parking_lot::RwLock;
use kvdb::{KeyValueDB, DBTransaction};
use client::backend::NewBlockState;
use client::blockchain::{BlockStatus, Cache as BlockchainCache,
HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo};
use client::cht;
@@ -29,29 +30,30 @@ use client::light::blockchain::Storage as LightBlockchainStorage;
use codec::{Decode, Encode};
use primitives::{AuthorityId, H256, Blake2Hasher};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash, HashFor,
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
Zero, One, As, NumberFor};
use cache::DbCache;
use utils::{meta_keys, Meta, db_err, number_to_db_key, db_key_to_number, open_database,
use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database,
read_db, read_id, read_meta};
use DatabaseSettings;
pub(crate) mod columns {
pub const META: Option<u32> = ::utils::COLUMN_META;
pub const BLOCK_INDEX: Option<u32> = Some(1);
pub const HASH_LOOKUP: Option<u32> = Some(1);
pub const HEADER: Option<u32> = Some(2);
pub const AUTHORITIES: Option<u32> = Some(3);
pub const CHT: Option<u32> = Some(4);
}
/// Keep authorities for last 'AUTHORITIES_ENTRIES_TO_KEEP' blocks.
#[allow(unused)]
pub(crate) const AUTHORITIES_ENTRIES_TO_KEEP: u64 = cht::SIZE;
/// Light blockchain storage. Stores most recent headers + CHTs for older headers.
pub struct LightStorage<Block: BlockT> {
db: Arc<KeyValueDB>,
meta: RwLock<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>>,
cache: DbCache<Block>,
_cache: DbCache<Block>,
}
#[derive(Clone, PartialEq, Debug)]
@@ -83,13 +85,18 @@ impl<Block> LightStorage<Block>
}
fn from_kvdb(db: Arc<KeyValueDB>) -> ClientResult<Self> {
let cache = DbCache::new(db.clone(), columns::BLOCK_INDEX, columns::AUTHORITIES)?;
let cache = DbCache::new(
db.clone(),
columns::HASH_LOOKUP,
columns::HEADER,
columns::AUTHORITIES
)?;
let meta = RwLock::new(read_meta::<Block>(&*db, columns::HEADER)?);
Ok(LightStorage {
db,
meta,
cache,
_cache: cache,
})
}
@@ -100,12 +107,18 @@ impl<Block> LightStorage<Block>
#[cfg(test)]
pub(crate) fn cache(&self) -> &DbCache<Block> {
&self.cache
&self._cache
}
fn update_meta(&self, hash: Block::Hash, number: <<Block as BlockT>::Header as HeaderT>::Number, is_best: bool) {
fn update_meta(
&self,
hash: Block::Hash,
number: <<Block as BlockT>::Header as HeaderT>::Number,
is_best: bool,
is_finalized: bool,
) {
let mut meta = self.meta.write();
if is_best {
let mut meta = self.meta.write();
if number == <<Block as BlockT>::Header as HeaderT>::Number::zero() {
meta.genesis_hash = hash;
}
@@ -113,6 +126,15 @@ impl<Block> LightStorage<Block>
meta.best_number = number;
meta.best_hash = hash;
}
if is_finalized {
if number == <<Block as BlockT>::Header as HeaderT>::Number::zero() {
meta.genesis_hash = hash;
}
meta.finalized_number = number;
meta.finalized_hash = hash;
}
}
}
@@ -121,13 +143,7 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
Block: BlockT,
{
fn header(&self, id: BlockId<Block>) -> ClientResult<Option<Block::Header>> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(ClientErrorKind::Backend("Error decoding header".into()).into()),
}
None => Ok(None),
}
::utils::read_header(&*self.db, columns::HASH_LOOKUP, columns::HEADER, id)
}
fn info(&self) -> ClientResult<BlockchainInfo<Block>> {
@@ -141,7 +157,12 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
fn status(&self, id: BlockId<Block>) -> ClientResult<BlockStatus> {
let exists = match id {
BlockId::Hash(_) => read_id(&*self.db, columns::BLOCK_INDEX, id)?.is_some(),
BlockId::Hash(_) => read_db(
&*self.db,
columns::HASH_LOOKUP,
columns::HEADER,
id
)?.is_some(),
BlockId::Number(n) => n <= self.meta.read().best_number,
};
match exists {
@@ -151,17 +172,90 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
}
fn number(&self, hash: Block::Hash) -> ClientResult<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
read_id::<Block>(&*self.db, columns::BLOCK_INDEX, BlockId::Hash(hash))
.and_then(|key| match key {
Some(key) => Ok(Some(db_key_to_number(&key)?)),
None => Ok(None),
})
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
}
fn hash(&self, number: <<Block as BlockT>::Header as HeaderT>::Number) -> ClientResult<Option<Block::Hash>> {
read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x|
x.map(|raw| HashFor::<Block>::hash(&raw[..])).map(Into::into)
)
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
}
}
impl<Block: BlockT> LightStorage<Block> where Block::Hash: From<H256> {
// note that a block is finalized. ensure that best chain contains the finalized
// block number first.
fn note_finalized(&self, transaction: &mut DBTransaction, header: &Block::Header, hash: Block::Hash) -> ClientResult<()> {
const NOTEWORTHY_FINALIZATION_GAP: u64 = 32;
// TODO: ensure this doesn't conflict with old finalized block.
let meta = self.meta.read();
let f_num = header.number().clone();
let number_u64: u64 = f_num.as_();
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
let (last_finalized_hash, last_finalized_number)
= (meta.finalized_hash.clone(), meta.finalized_number);
let finalized_gap = f_num - last_finalized_number;
if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP {
info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&hash, f_num));
} else {
debug!(target: "db", "Finalizing blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&hash, f_num));
}
// build new CHT if required
let mut build_cht = |header: &Block::Header| -> ClientResult<()> {
if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) {
let new_cht_start: NumberFor<Block> = cht::start_number(cht::SIZE, new_cht_number);
let new_cht_root: Option<Block::Hash> = cht::compute_root::<Block::Header, Blake2Hasher, _>(
cht::SIZE, new_cht_number, (new_cht_start.as_()..)
.map(|num| self.hash(As::sa(num)).unwrap_or_default())
);
if let Some(new_cht_root) = new_cht_root {
transaction.put(columns::CHT, &number_to_lookup_key(new_cht_start), new_cht_root.as_ref());
let mut prune_block = new_cht_start;
let new_cht_end = cht::end_number(cht::SIZE, new_cht_number);
trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number);
while prune_block <= new_cht_end {
let id = read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?;
if let Some(hash) = id {
let lookup_key = number_to_lookup_key(prune_block);
transaction.delete(columns::HASH_LOOKUP, &lookup_key);
transaction.delete(columns::HEADER, hash.as_ref());
}
prune_block += <<Block as BlockT>::Header as HeaderT>::Number::one();
}
}
}
Ok(())
};
// attempt to build CHT for all newly finalized blocks.
let last_finalized_u64 = last_finalized_number.as_();
for num in (last_finalized_u64..number_u64).map(|x| x + 1) {
let num = As::sa(num);
if num == f_num {
build_cht(header)?;
} else {
let old_header = match self.header(BlockId::Number(num))? {
Some(x) => x,
None => panic!("finalizing block {} implies existence of block {}; qed", f_num, num),
};
build_cht(&old_header)?;
}
}
Ok(())
}
}
@@ -170,63 +264,75 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
Block: BlockT,
Block::Hash: From<H256>,
{
fn import_header(&self, is_new_best: bool, header: Block::Header, authorities: Option<Vec<AuthorityId>>) -> ClientResult<()> {
fn import_header(
&self,
header: Block::Header,
_authorities: Option<Vec<AuthorityId>>,
leaf_state: NewBlockState,
) -> ClientResult<()> {
let mut transaction = DBTransaction::new();
let hash = header.hash();
let number = *header.number();
let key = number_to_db_key(number);
transaction.put(columns::HEADER, &key, &header.encode());
transaction.put(columns::BLOCK_INDEX, hash.as_ref(), &key);
transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
let best_authorities = if is_new_best {
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
if leaf_state.is_best() {
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
// cache authorities for previous block
let number: u64 = number.as_();
let previous_number = number.checked_sub(1);
let best_authorities = previous_number
.and_then(|previous_number| self.cache.authorities_at_cache()
.commit_best_entry(&mut transaction, As::sa(previous_number), authorities));
// handle reorg.
{
let meta = self.meta.read();
if meta.best_hash != Default::default() {
let parent_hash = *header.parent_hash();
let tree_route = ::utils::tree_route::<Block>(
&*self.db,
columns::HEADER,
meta.best_hash,
parent_hash,
)?;
// prune authorities from 'ancient' blocks
if let Some(ancient_number) = number.checked_sub(AUTHORITIES_ENTRIES_TO_KEEP) {
self.cache.authorities_at_cache().prune_entries(&mut transaction, As::sa(ancient_number))?;
}
// update block number to hash lookup entries.
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
// TODO: can we recover here?
warn!("Safety failure: reverting finalized block {:?}",
(&retracted.number, &retracted.hash));
}
best_authorities
} else {
None
};
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
}
// build new CHT if required
if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) {
let new_cht_start: NumberFor<Block> = cht::start_number(cht::SIZE, new_cht_number);
let new_cht_root: Option<Block::Hash> = cht::compute_root::<Block::Header, Blake2Hasher, _>(
cht::SIZE, new_cht_number, (new_cht_start.as_()..)
.map(|num| self.hash(As::sa(num)).unwrap_or_default()));
if let Some(new_cht_root) = new_cht_root {
transaction.put(columns::CHT, &number_to_db_key(new_cht_start), new_cht_root.as_ref());
let mut prune_block = new_cht_start;
let new_cht_end = cht::end_number(cht::SIZE, new_cht_number);
trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number);
while prune_block <= new_cht_end {
transaction.delete(columns::HEADER, &number_to_db_key(prune_block));
prune_block += <<Block as BlockT>::Header as HeaderT>::Number::one();
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
}
}
}
// TODO: cache authorities for previous block, accounting for reorgs.
}
let finalized = match leaf_state {
NewBlockState::Final => true,
_ => false,
};
if finalized {
self.note_finalized(&mut transaction, &header, hash)?;
}
debug!("Light DB Commit {:?} ({})", hash, number);
self.db.write(transaction).map_err(db_err)?;
self.update_meta(hash, number, is_new_best);
if let Some(best_authorities) = best_authorities {
self.cache.authorities_at_cache().update_best_entry(Some(best_authorities));
}
self.update_meta(hash, number, leaf_state.is_best(), finalized);
Ok(())
}
@@ -236,13 +342,31 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?;
let cht_start = cht::start_number(cht_size, cht_number);
self.db.get(columns::CHT, &number_to_db_key(cht_start)).map_err(db_err)?
self.db.get(columns::CHT, &number_to_lookup_key(cht_start)).map_err(db_err)?
.ok_or_else(no_cht_for_block)
.and_then(|hash| Block::Hash::decode(&mut &*hash).ok_or_else(no_cht_for_block))
}
fn finalize_header(&self, id: BlockId<Block>) -> ClientResult<()> {
if let Some(header) = self.header(id)? {
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.db.write(transaction).map_err(db_err)?;
self.update_meta(hash, header.number().clone(), false, true);
Ok(())
} else {
Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into())
}
}
fn last_finalized(&self) -> ClientResult<Block::Hash> {
Ok(self.meta.read().finalized_hash.clone())
}
fn cache(&self) -> Option<&BlockchainCache<Block>> {
Some(&self.cache)
None
}
}
@@ -269,7 +393,7 @@ pub(crate) mod tests {
};
let hash = header.hash();
db.import_header(true, header, authorities).unwrap();
db.import_header(header, authorities, NewBlockState::Best).unwrap();
hash
}
@@ -328,15 +452,15 @@ pub(crate) mod tests {
let genesis_hash = insert_block(&db, &Default::default(), 0, None);
assert_eq!(db.db.iter(columns::HEADER).count(), 1);
assert_eq!(db.db.iter(columns::BLOCK_INDEX).count(), 1);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), 1);
let _ = insert_block(&db, &genesis_hash, 1, None);
assert_eq!(db.db.iter(columns::HEADER).count(), 2);
assert_eq!(db.db.iter(columns::BLOCK_INDEX).count(), 2);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), 2);
}
#[test]
fn ancient_headers_are_replaced_with_cht() {
fn finalized_ancient_headers_are_replaced_with_cht() {
let db = LightStorage::new_test();
// insert genesis block header (never pruned)
@@ -357,10 +481,16 @@ pub(crate) mod tests {
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned
insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None);
// nothing is yet finalized, so nothing is pruned.
prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None);
assert_eq!(db.db.iter(columns::HEADER).count(), (2 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// now finalize the block.
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 1);
assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_db_key(1 + i)).unwrap().is_none()));
assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_lookup_key(1 + i)).unwrap().is_none()));
}
#[test]
@@ -383,6 +513,7 @@ pub(crate) mod tests {
prev_hash = insert_block(&db, &prev_hash, i as u64, None);
}
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
let cht_root_1 = db.cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)).unwrap();
let cht_root_2 = db.cht_root(cht::SIZE, (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64).unwrap();
let cht_root_3 = db.cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)).unwrap();
+214 -35
View File
@@ -27,7 +27,7 @@ use client;
use codec::Decode;
use hashdb::DBValue;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Hash, HashFor, Zero};
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Zero};
use DatabaseSettings;
/// Number of columns in the db. Must be the same for both full && light dbs.
@@ -42,8 +42,12 @@ pub mod meta_keys {
pub const TYPE: &[u8; 4] = b"type";
/// Best block key.
pub const BEST_BLOCK: &[u8; 4] = b"best";
/// Last finalized block key.
pub const FINALIZED_BLOCK: &[u8; 5] = b"final";
/// Best authorities block key.
pub const BEST_AUTHORITIES: &[u8; 4] = b"auth";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
}
/// Database metadata.
@@ -52,15 +56,19 @@ pub struct Meta<N, H> {
pub best_hash: H,
/// Number of the best known block.
pub best_number: N,
/// Hash of the best finalized block.
pub finalized_hash: H,
/// Number of the best finalized block.
pub finalized_number: N,
/// Hash of the genesis block.
pub genesis_hash: H,
}
/// Type of block key in the database (LE block number).
pub type BlockKey = [u8; 4];
/// A block lookup key: used for canonical lookup from block number to hash
pub type BlockLookupKey = [u8; 4];
/// Convert block number into key (LE representation).
pub fn number_to_db_key<N>(n: N) -> BlockKey where N: As<u64> {
/// Convert block number into lookup key (LE representation).
pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
let n: u64 = n.as_();
assert!(n & 0xffffffff00000000 == 0);
@@ -72,8 +80,8 @@ pub fn number_to_db_key<N>(n: N) -> BlockKey where N: As<u64> {
]
}
/// Convert block key into block number.
pub fn db_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
/// Convert block lookup key into block number.
pub fn lookup_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
match key.len() {
4 => Ok((key[0] as u64) << 24
| (key[1] as u64) << 16
@@ -114,19 +122,24 @@ pub fn open_database(config: &DatabaseSettings, db_type: &str) -> client::error:
Ok(Arc::new(db))
}
/// Convert block id to block key, reading number from db if required.
pub fn read_id<Block>(db: &KeyValueDB, col_index: Option<u32>, id: BlockId<Block>) -> Result<Option<BlockKey>, client::error::Error>
/// Convert block id to block key, looking up canonical hash by number from DB as necessary.
pub fn read_id<Block>(db: &KeyValueDB, col_index: Option<u32>, id: BlockId<Block>) -> Result<Option<Block::Hash>, client::error::Error>
where
Block: BlockT,
{
match id {
BlockId::Hash(h) => db.get(col_index, h.as_ref())
.map(|v| v.map(|v| {
let mut key: [u8; 4] = [0; 4];
key.copy_from_slice(&v);
key
})).map_err(db_err),
BlockId::Number(n) => Ok(Some(number_to_db_key(n))),
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => db.get(col_index, &number_to_lookup_key(n)).map(|v|
v.map(|v| {
let mut h = <Block::Hash>::default();
{
let h = h.as_mut();
let len = ::std::cmp::min(v.len(), h.len());
h.as_mut().copy_from_slice(&v[..len]);
}
h
})
).map_err(db_err),
}
}
@@ -136,39 +149,205 @@ pub fn read_db<Block>(db: &KeyValueDB, col_index: Option<u32>, col: Option<u32>,
Block: BlockT,
{
read_id(db, col_index, id).and_then(|key| match key {
Some(key) => db.get(col, &key).map_err(db_err),
Some(key) => db.get(col, key.as_ref()).map_err(db_err),
None => Ok(None),
})
}
/// Read a header from the database.
pub fn read_header<Block: BlockT>(
db: &KeyValueDB,
col_index: Option<u32>,
col: Option<u32>,
id: BlockId<Block>,
) -> client::error::Result<Option<Block::Header>> {
match read_db(db, col_index, col, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(
client::error::ErrorKind::Backend("Error decoding header".into()).into()
),
}
None => Ok(None),
}
}
/// Read meta from the database.
pub fn read_meta<Block>(db: &KeyValueDB, col_header: Option<u32>) -> Result<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>, client::error::Error>
pub fn read_meta<Block>(db: &KeyValueDB, col_header: Option<u32>) -> Result<
Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>,
client::error::Error,
>
where
Block: BlockT,
{
let genesis_number = <<Block as BlockT>::Header as HeaderT>::Number::zero();
let (best_hash, best_number) = if let Some(Some(header)) = db.get(COLUMN_META, meta_keys::BEST_BLOCK).and_then(|id|
match id {
Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))),
None => Ok(None),
}).map_err(db_err)?
{
let hash = header.hash();
debug!("DB Opened blockchain db, best {:?} ({})", hash, header.number());
(hash, *header.number())
} else {
(Default::default(), genesis_number)
let genesis_hash: Block::Hash = match db.get(COLUMN_META, meta_keys::GENESIS_HASH).map_err(db_err)? {
Some(h) => match Decode::decode(&mut &h[..]) {
Some(h) => h,
None => return Err(client::error::ErrorKind::Backend("Error decoding genesis hash".into()).into()),
},
None => return Ok(Meta {
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
}),
};
let genesis_hash = db.get(col_header, &number_to_db_key(genesis_number))
.map_err(db_err)?
.map(|raw| HashFor::<Block>::hash(&raw[..]))
.unwrap_or_default()
.into();
let load_meta_block = |desc, key| -> Result<_, client::error::Error> {
if let Some(Some(header)) = db.get(COLUMN_META, key).and_then(|id|
match id {
Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))),
None => Ok(None),
}).map_err(db_err)?
{
let hash = header.hash();
debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number());
Ok((hash, *header.number()))
} else {
Ok((genesis_hash.clone(), Zero::zero()))
}
};
let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?;
let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?;
Ok(Meta {
best_hash,
best_number,
finalized_hash,
finalized_number,
genesis_hash,
})
}
/// An entry in a tree route.
#[derive(Debug)]
pub struct RouteEntry<Block: BlockT> {
/// The number of the block.
pub number: <Block::Header as HeaderT>::Number,
/// The hash of the block.
pub hash: Block::Hash,
}
/// A tree-route from one block to another in the chain.
///
/// All blocks prior to the pivot in the deque is the reverse-order unique ancestry
/// of the first block, the block at the pivot index is the common ancestor,
/// and all blocks after the pivot is the ancestry of the second block, in
/// order.
///
/// The ancestry sets will include the given blocks, and thus the tree-route is
/// never empty.
///
/// ```ignore
/// Tree route from R1 to E2. Retracted is [R1, R2, R3], Common is C, enacted [E1, E2]
/// <- R3 <- R2 <- R1
/// /
/// C
/// \-> E1 -> E2
/// ```
///
/// ```ignore
/// Tree route from C to E2. Retracted empty. Common is C, enacted [E1, E2]
/// C -> E1 -> E2
/// ```
#[derive(Debug)]
pub struct TreeRoute<Block: BlockT> {
route: Vec<RouteEntry<Block>>,
pivot: usize,
}
impl<Block: BlockT> TreeRoute<Block> {
/// Get an iterator of all retracted blocks in reverse order (towards common ancestor)
pub fn retracted(&self) -> impl Iterator<Item=&RouteEntry<Block>> {
self.route.iter().take(self.pivot)
}
/// Get the common ancestor block. This might be one of the two blocks of the
/// route.
#[allow(unused)]
pub fn common_block(&self) -> &RouteEntry<Block> {
self.route.get(self.pivot).expect("tree-routes are computed between blocks; \
which are included in the route; \
thus it is never empty; qed")
}
/// Get an iterator of enacted blocks (descendents of the common ancestor)
pub fn enacted(&self) -> impl Iterator<Item=&RouteEntry<Block>> {
self.route.iter().skip(self.pivot + 1)
}
}
/// Compute a tree-route between two blocks. See tree-route docs for more details.
pub fn tree_route<Block: BlockT>(
db: &KeyValueDB,
col_header: Option<u32>,
from: Block::Hash,
to: Block::Hash,
) -> Result<TreeRoute<Block>, client::error::Error> {
use runtime_primitives::traits::Header;
let load_header = |hash: &Block::Hash| {
match db.get(col_header, hash.as_ref()).map_err(db_err) {
Ok(Some(b)) => match <Block::Header>::decode(&mut &b[..]) {
Some(hdr) => Ok(hdr),
None => Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()),
}
Ok(None) => Err(client::error::ErrorKind::UnknownBlock(format!("Unknown block {:?}", hash)).into()),
Err(e) => Err(e),
}
};
let mut from = load_header(&from)?;
let mut to = load_header(&to)?;
let mut from_branch = Vec::new();
let mut to_branch = Vec::new();
while to.number() > from.number() {
to_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
to = load_header(to.parent_hash())?;
}
while from.number() > to.number() {
from_branch.push(RouteEntry {
number: from.number().clone(),
hash: from.hash(),
});
from = load_header(from.parent_hash())?;
}
// numbers are equal now. walk backwards until the block is the same
while to != from {
to_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
to = load_header(to.parent_hash())?;
from_branch.push(RouteEntry {
number: from.number().clone(),
hash: from.hash(),
});
from = load_header(from.parent_hash())?;
}
// add the pivot block. and append the reversed to-branch (note that it's reverse order originalls)
let pivot = from_branch.len();
from_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
from_branch.extend(to_branch.into_iter().rev());
Ok(TreeRoute {
route: from_branch,
pivot,
})
}