Introduce notion of finality to substrate (#760)

* finalization for in_mem

* fetch last finalized block

* pruning: use canonical term instead of final

* finalize blocks in full node

* begin to port light client DB

* add tree-route

* keep number index consistent in full nodes

* fix tests

* disable cache and finish porting light client

* add AsMut to system module

* final leaf is always best

* fix all tests

* Fix comment and trace

* removed unused Into call

* add comment on behavior of `finalize_block`
This commit is contained in:
Robert Habermeier
2018-09-21 15:56:21 +02:00
committed by Gav Wood
parent 28cc4d0fd6
commit b7d095a2e0
19 changed files with 976 additions and 370 deletions
+37 -12
View File
@@ -27,12 +27,13 @@ use codec::{Codec, Encode, Decode};
use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, As, NumberFor};
use utils::{COLUMN_META, BlockKey, db_err, meta_keys, read_id, db_key_to_number, number_to_db_key};
use utils::{COLUMN_META, BlockLookupKey, db_err, meta_keys, lookup_key_to_number, number_to_lookup_key};
/// Database-backed cache of blockchain data.
pub struct DbCache<Block: BlockT> {
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_at: DbCacheList<Block, Vec<AuthorityId>>,
}
@@ -45,16 +46,19 @@ impl<Block> DbCache<Block>
pub fn new(
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>
) -> ClientResult<Self> {
Ok(DbCache {
db: db.clone(),
block_index_column,
header_column,
authorities_at: DbCacheList::new(db, meta_keys::BEST_AUTHORITIES, authorities_column)?,
})
}
/// Get authorities_cache.
#[allow(unused)]
pub fn authorities_at_cache(&self) -> &DbCacheList<Block, Vec<AuthorityId>> {
&self.authorities_at
}
@@ -66,10 +70,27 @@ impl<Block> BlockchainCache<Block> for DbCache<Block>
NumberFor<Block>: As<u64>,
{
fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
let authorities_at = read_id(&*self.db, self.block_index_column, at).and_then(|at| match at {
Some(at) => self.authorities_at.value_at_key(at),
None => Ok(None),
});
use runtime_primitives::traits::Header as HeaderT;
let number = match at {
BlockId::Number(n) => Ok(number_to_lookup_key(n)),
BlockId::Hash(h) => {
let maybe_header = ::utils::read_header::<Block>(
&*self.db,
self.block_index_column,
self.header_column,
BlockId::Hash(h),
);
match maybe_header {
Ok(Some(hdr)) => Ok(number_to_lookup_key(*hdr.number())),
Ok(None) => return None, // no such block.
Err(e) => Err(e),
}
}
};
let authorities_at = number.and_then(|at| self.authorities_at.value_at_key(at));
match authorities_at {
Ok(authorities) => authorities,
@@ -128,7 +149,7 @@ impl<Block, T> DbCacheList<Block, T>
.map_err(db_err)
.and_then(|block| match block {
Some(block) => {
let valid_from = db_key_to_number(&block)?;
let valid_from = lookup_key_to_number(&block)?;
read_storage_entry::<Block, T>(&*db, column, valid_from)
.map(|entry| Some(Entry {
valid_from,
@@ -155,6 +176,7 @@ impl<Block, T> DbCacheList<Block, T>
/// Commits the new best pending value to the database. Returns Some if best entry must
/// be updated after transaction is committed.
#[allow(unused)]
pub fn commit_best_entry(
&self,
transaction: &mut DBTransaction,
@@ -174,7 +196,7 @@ impl<Block, T> DbCacheList<Block, T>
return None;
}
let valid_from_key = number_to_db_key(valid_from);
let valid_from_key = number_to_lookup_key(valid_from);
transaction.put(COLUMN_META, self.meta_key, &valid_from_key);
transaction.put(self.column, &valid_from_key, &StorageEntry {
prev_valid_from: best_entry.map(|b| b.valid_from),
@@ -189,12 +211,14 @@ impl<Block, T> DbCacheList<Block, T>
/// Updates the best in-memory cache entry. Must be called after transaction with changes
/// from commit_best_entry has been committed.
#[allow(unused)]
pub fn update_best_entry(&self, best_entry: Option<Entry<NumberFor<Block>, T>>) {
*self.best_entry.write() = best_entry;
}
/// Prune all entries from the beginning up to the block (including entry at the number). Returns
/// the number of pruned entries. Pruning never deletes the latest entry in the cache.
#[allow(unused)]
pub fn prune_entries(
&self,
transaction: &mut DBTransaction,
@@ -228,7 +252,7 @@ impl<Block, T> DbCacheList<Block, T>
.expect("referenced entry exists; entry_to_remove is a reference to the entry; qed");
if current_entry != last_entry_to_keep {
transaction.delete(self.column, &number_to_db_key(current_entry));
transaction.delete(self.column, &number_to_lookup_key(current_entry));
pruned += 1;
}
entry_to_remove = entry.prev_valid_from;
@@ -237,15 +261,15 @@ impl<Block, T> DbCacheList<Block, T>
let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, last_entry_to_keep)?
.expect("last_entry_to_keep >= first_entry_to_remove; that means that we're leaving this entry in the db; qed");
entry.prev_valid_from = None;
transaction.put(self.column, &number_to_db_key(last_entry_to_keep), &entry.encode());
transaction.put(self.column, &number_to_lookup_key(last_entry_to_keep), &entry.encode());
Ok(pruned)
}
/// Reads the cached value, actual at given block. Returns None if the value was not cached
/// or if it has been pruned.
fn value_at_key(&self, key: BlockKey) -> ClientResult<Option<T>> {
let at = db_key_to_number::<NumberFor<Block>>(&key)?;
fn value_at_key(&self, key: BlockLookupKey) -> ClientResult<Option<T>> {
let at = lookup_key_to_number::<NumberFor<Block>>(&key)?;
let best_valid_from = match self.best_entry() {
// there are entries in cache
Some(best_entry) => {
@@ -291,7 +315,7 @@ fn read_storage_entry<Block, T>(
NumberFor<Block>: As<u64>,
T: Codec,
{
db.get(column, &number_to_db_key(number))
db.get(column, &number_to_lookup_key(number))
.and_then(|entry| match entry {
Some(entry) => Ok(StorageEntry::<NumberFor<Block>, T>::decode(&mut &entry[..])),
None => Ok(None),
@@ -324,6 +348,7 @@ mod tests {
}
#[test]
#[ignore] // TODO: unignore when cache reinstated.
fn best_authorities_are_updated() {
let db = LightStorage::new_test();
let authorities_at: Vec<(usize, Option<Entry<u64, Vec<AuthorityId>>>)> = vec![
+226 -84
View File
@@ -49,6 +49,7 @@ use std::sync::Arc;
use std::path::PathBuf;
use std::io;
use client::backend::NewBlockState;
use codec::{Decode, Encode};
use hashdb::Hasher;
use kvdb::{KeyValueDB, DBTransaction};
@@ -57,14 +58,12 @@ use parking_lot::RwLock;
use primitives::{H256, AuthorityId, Blake2Hasher, RlpCodec};
use runtime_primitives::generic::BlockId;
use runtime_primitives::bft::Justification;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, Hash, HashFor,
NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::BuildStorage;
use state_machine::backend::Backend as StateBackend;
use executor::RuntimeInfo;
use state_machine::{CodeExecutor, DBValue, ExecutionStrategy};
use utils::{Meta, db_err, meta_keys, number_to_db_key, db_key_to_number, open_database,
read_db, read_id, read_meta};
use utils::{Meta, db_err, meta_keys, open_database, read_db, read_id, read_meta};
use state_db::StateDb;
pub use state_db::PruningMode;
@@ -104,7 +103,7 @@ mod columns {
pub const META: Option<u32> = Some(0);
pub const STATE: Option<u32> = Some(1);
pub const STATE_META: Option<u32> = Some(2);
pub const BLOCK_INDEX: Option<u32> = Some(3);
pub const HASH_LOOKUP: Option<u32> = Some(3);
pub const HEADER: Option<u32> = Some(4);
pub const BODY: Option<u32> = Some(5);
pub const JUSTIFICATION: Option<u32> = Some(6);
@@ -115,7 +114,7 @@ struct PendingBlock<Block: BlockT> {
header: Block::Header,
justification: Option<Justification<Block::Hash>>,
body: Option<Vec<Block::Extrinsic>>,
is_best: bool,
leaf_state: NewBlockState,
}
// wrapper that implements trait required for state_db
@@ -144,27 +143,33 @@ impl<Block: BlockT> BlockchainDb<Block> {
})
}
fn update_meta(&self, hash: Block::Hash, number: <Block::Header as HeaderT>::Number, is_best: bool) {
fn update_meta(
&self,
hash: Block::Hash,
number: <Block::Header as HeaderT>::Number,
is_best: bool,
is_finalized: bool
) {
let mut meta = self.meta.write();
if number == Zero::zero() {
meta.genesis_hash = hash;
}
if is_best {
let mut meta = self.meta.write();
if number == Zero::zero() {
meta.genesis_hash = hash;
}
meta.best_number = number;
meta.best_hash = hash;
}
if is_finalized {
meta.finalized_number = number;
meta.finalized_hash = hash;
}
}
}
impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
fn header(&self, id: BlockId<Block>) -> Result<Option<Block::Header>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()),
}
None => Ok(None),
}
::utils::read_header(&*self.db, columns::HASH_LOOKUP, columns::HEADER, id)
}
fn info(&self) -> Result<client::blockchain::Info<Block>, client::error::Error> {
@@ -178,7 +183,12 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
fn status(&self, id: BlockId<Block>) -> Result<client::blockchain::BlockStatus, client::error::Error> {
let exists = match id {
BlockId::Hash(_) => read_id(&*self.db, columns::BLOCK_INDEX, id)?.is_some(),
BlockId::Hash(_) => read_db(
&*self.db,
columns::HASH_LOOKUP,
columns::HEADER,
id
)?.is_some(),
BlockId::Number(n) => n <= self.meta.read().best_number,
};
match exists {
@@ -188,23 +198,20 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
}
fn number(&self, hash: Block::Hash) -> Result<Option<<Block::Header as HeaderT>::Number>, client::error::Error> {
read_id::<Block>(&*self.db, columns::BLOCK_INDEX, BlockId::Hash(hash))
.and_then(|key| match key {
Some(key) => Ok(Some(db_key_to_number(&key)?)),
None => Ok(None),
})
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
}
fn hash(&self, number: <Block::Header as HeaderT>::Number) -> Result<Option<Block::Hash>, client::error::Error> {
read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x|
x.map(|raw| HashFor::<Block>::hash(&raw[..])).map(Into::into)
)
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
}
}
impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
fn body(&self, id: BlockId<Block>) -> Result<Option<Vec<Block::Extrinsic>>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::BODY, id)? {
match read_db(&*self.db, columns::HASH_LOOKUP, columns::BODY, id)? {
Some(body) => match Decode::decode(&mut &body[..]) {
Some(body) => Ok(Some(body)),
None => return Err(client::error::ErrorKind::Backend("Error decoding body".into()).into()),
@@ -214,7 +221,7 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
}
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification<Block::Hash>>, client::error::Error> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::JUSTIFICATION, id)? {
match read_db(&*self.db, columns::HASH_LOOKUP, columns::JUSTIFICATION, id)? {
Some(justification) => match Decode::decode(&mut &justification[..]) {
Some(justification) => Ok(Some(justification)),
None => return Err(client::error::ErrorKind::Backend("Error decoding justification".into()).into()),
@@ -223,6 +230,10 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
}
}
fn last_finalized(&self) -> Result<Block::Hash, client::error::Error> {
Ok(self.meta.read().finalized_hash.clone())
}
fn cache(&self) -> Option<&client::blockchain::Cache<Block>> {
None
}
@@ -246,13 +257,19 @@ where Block: BlockT,
Ok(Some(&self.old_state))
}
fn set_block_data(&mut self, header: Block::Header, body: Option<Vec<Block::Extrinsic>>, justification: Option<Justification<Block::Hash>>, is_best: bool) -> Result<(), client::error::Error> {
fn set_block_data(
&mut self,
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
leaf_state: NewBlockState,
) -> Result<(), client::error::Error> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
self.pending_block = Some(PendingBlock {
header,
body,
justification,
is_best,
leaf_state,
});
Ok(())
}
@@ -324,7 +341,7 @@ pub struct DbChangesTrieStorage<Block: BlockT> {
impl<Block: BlockT> state_machine::ChangesTrieStorage<Blake2Hasher> for DbChangesTrieStorage<Block> {
fn root(&self, block: u64) -> Result<Option<H256>, String> {
Ok(read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(As::sa(block)))
Ok(read_db::<Block>(&*self.db, columns::HASH_LOOKUP, columns::HEADER, BlockId::Number(As::sa(block)))
.map_err(|err| format!("{}", err))
.and_then(|header| match header {
Some(header) => Block::Header::decode(&mut &header[..])
@@ -345,20 +362,22 @@ impl<Block: BlockT> state_machine::ChangesTrieStorage<Blake2Hasher> for DbChange
}
/// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks.
/// Otherwise, trie nodes are kept only from the most recent block.
/// Otherwise, trie nodes are kept only from some recent blocks.
pub struct Backend<Block: BlockT> {
storage: Arc<StorageDb<Block>>,
tries_change_storage: DbChangesTrieStorage<Block>,
blockchain: BlockchainDb<Block>,
finalization_window: u64,
pruning_window: u64,
}
impl<Block: BlockT> Backend<Block> {
/// Create a new instance of database backend.
pub fn new(config: DatabaseSettings, finalization_window: u64) -> Result<Self, client::error::Error> {
///
/// The pruning window is how old a block must be before the state is pruned.
pub fn new(config: DatabaseSettings, pruning_window: u64) -> Result<Self, client::error::Error> {
let db = open_database(&config, "full")?;
Backend::from_kvdb(db as Arc<_>, config.pruning, finalization_window)
Backend::from_kvdb(db as Arc<_>, config.pruning, pruning_window)
}
#[cfg(test)]
@@ -370,7 +389,7 @@ impl<Block: BlockT> Backend<Block> {
Backend::from_kvdb(db as Arc<_>, PruningMode::keep_blocks(keep_blocks), 0).expect("failed to create test-db")
}
fn from_kvdb(db: Arc<KeyValueDB>, pruning: PruningMode, finalization_window: u64) -> Result<Self, client::error::Error> {
fn from_kvdb(db: Arc<KeyValueDB>, pruning: PruningMode, pruning_window: u64) -> Result<Self, client::error::Error> {
let blockchain = BlockchainDb::new(db.clone())?;
let map_e = |e: state_db::Error<io::Error>| ::client::error::Error::from(format!("State database error: {:?}", e));
let state_db: StateDb<Block::Hash, H256> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?;
@@ -387,9 +406,68 @@ impl<Block: BlockT> Backend<Block> {
storage: Arc::new(storage_db),
tries_change_storage: tries_change_storage,
blockchain,
finalization_window,
pruning_window,
})
}
// write stuff to a transaction after a new block is finalized.
//
// this manages state pruning and ensuring reorgs don't occur.
// this function should only be called if the finalized block is contained
// in the best chain.
fn note_finalized(&self, transaction: &mut DBTransaction, f_header: &Block::Header, f_hash: Block::Hash) -> Result<(), client::error::Error> {
const NOTEWORTHY_FINALIZATION_GAP: u64 = 32;
// TODO: ensure this doesn't conflict with old finalized block.
let meta = self.blockchain.meta.read();
let f_num = f_header.number().clone();
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, f_hash.as_ref());
let (last_finalized_hash, last_finalized_number)
= (meta.finalized_hash.clone(), meta.finalized_number);
let finalized_gap = f_num - last_finalized_number;
if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP {
info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&f_hash, f_num));
} else {
debug!(target: "db", "Finalizing blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&f_hash, f_num));
}
let mut canonicalize_state = |canonical_hash| {
let commit = self.storage.state_db.canonicalize_block(&canonical_hash);
apply_state_commit(transaction, commit);
};
// when finalizing a block, we must also implicitly finalize all the blocks
// in between the last finalized block and this one. That means canonicalizing
// all their states in order.
let number_u64 = f_num.as_();
if number_u64 > self.pruning_window {
let new_canonical = number_u64 - self.pruning_window;
let best_canonical = self.storage.state_db.best_canonical();
for uncanonicalized_number in (best_canonical..new_canonical).map(|x| x + 1) {
let hash = if uncanonicalized_number == number_u64 {
f_hash
} else {
read_id::<Block>(
&*self.blockchain.db,
columns::HASH_LOOKUP,
BlockId::Number(As::sa(uncanonicalized_number))
)?.expect("existence of block with number `new_canonical` \
implies existence of blocks with all nubmers before it; qed")
};
trace!(target: "db", "Canonicalize block #{} ({:?})", uncanonicalized_number, hash);
canonicalize_state(hash);
}
};
Ok(())
}
}
fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet<H256>) {
@@ -430,23 +508,69 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
}
fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> Result<(), client::error::Error> {
use client::blockchain::HeaderBackend;
let mut transaction = DBTransaction::new();
if let Some(pending_block) = operation.pending_block {
let hash = pending_block.header.hash();
let number = pending_block.header.number().clone();
let key = number_to_db_key(number.clone());
transaction.put(columns::HEADER, &key, &pending_block.header.encode());
transaction.put(columns::HEADER, hash.as_ref(), &pending_block.header.encode());
if let Some(body) = pending_block.body {
transaction.put(columns::BODY, &key, &body.encode());
transaction.put(columns::BODY, hash.as_ref(), &body.encode());
}
if let Some(justification) = pending_block.justification {
transaction.put(columns::JUSTIFICATION, &key, &justification.encode());
transaction.put(columns::JUSTIFICATION, hash.as_ref(), &justification.encode());
}
transaction.put(columns::BLOCK_INDEX, hash.as_ref(), &key);
if pending_block.is_best {
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
if pending_block.leaf_state.is_best() {
let meta = self.blockchain.meta.read();
// cannot find tree route with empty DB.
if meta.best_hash != Default::default() {
let parent_hash = *pending_block.header.parent_hash();
let tree_route = ::utils::tree_route::<Block>(
&*self.blockchain.db,
columns::HEADER,
meta.best_hash,
parent_hash,
)?;
// update block number to hash lookup entries.
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
// TODO: can we recover here?
warn!("Safety failure: reverting finalized block {:?}",
(&retracted.number, &retracted.hash));
}
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
}
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
}
}
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(number),
hash.as_ref()
);
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
}
if number == Zero::zero() {
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
}
let mut changeset: state_db::ChangeSet<H256> = state_db::ChangeSet::default();
for (key, (val, rc)) in operation.updates.drain() {
if rc > 0 {
@@ -455,37 +579,51 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
changeset.deleted.push(key.0.into());
}
}
let number_u64 = number.as_().into();
let number_u64 = number.as_();
let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset);
apply_state_commit(&mut transaction, commit);
apply_changes_trie_commit(&mut transaction, operation.changes_trie_updates);
//finalize an older block
if number_u64 > self.finalization_window {
let finalizing_hash = if self.finalization_window == 0 {
Some(hash)
} else {
let finalizing = number_u64 - self.finalization_window;
if finalizing > self.storage.state_db.best_finalized() {
self.blockchain.hash(As::sa(finalizing))?
} else {
None
}
};
if let Some(finalizing_hash) = finalizing_hash {
trace!(target: "db", "Finalizing block #{} ({:?})", number_u64 - self.finalization_window, finalizing_hash);
let commit = self.storage.state_db.finalize_block(&finalizing_hash);
apply_state_commit(&mut transaction, commit);
}
let finalized = match pending_block.leaf_state {
NewBlockState::Final => true,
_ => false,
};
if finalized {
// TODO: ensure best chain contains this block.
self.note_finalized(&mut transaction, &pending_block.header, hash)?;
}
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, pending_block.is_best);
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number,
pending_block.leaf_state.is_best());
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, number, pending_block.is_best);
self.blockchain.update_meta(
hash,
number,
pending_block.leaf_state.is_best(),
finalized,
);
}
Ok(())
}
fn finalize_block(&self, block: BlockId<Block>) -> Result<(), client::error::Error> {
use runtime_primitives::traits::Header;
if let Some(header) = ::client::blockchain::HeaderBackend::header(&self.blockchain, block)? {
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, header.number().clone(), false, true);
Ok(())
} else {
Err(client::error::ErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", block)).into())
}
}
fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> {
Some(&self.tries_change_storage)
}
@@ -501,18 +639,16 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher, RlpCodec> for Backend<
match self.storage.state_db.revert_one() {
Some(commit) => {
apply_state_commit(&mut transaction, commit);
let removed = self.blockchain.hash(best)?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
let removed = best.clone();
best -= As::sa(1);
let key = number_to_db_key(best.clone());
let hash = self.blockchain.hash(best)?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
transaction.delete(columns::BLOCK_INDEX, removed.as_ref());
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.delete(columns::HASH_LOOKUP, &::utils::number_to_lookup_key(removed));
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, best, true);
self.blockchain.update_meta(hash, best, true, false);
}
None => return Ok(As::sa(c))
}
@@ -593,7 +729,7 @@ mod tests {
header,
Some(vec![]),
None,
true,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
}
@@ -605,7 +741,7 @@ mod tests {
#[test]
fn set_state_data() {
let db = Backend::<Block>::new_test(2);
{
let hash = {
let mut op = db.begin_operation(BlockId::Hash(Default::default())).unwrap();
let mut header = Header {
number: 0,
@@ -625,13 +761,14 @@ mod tests {
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
let hash = header.hash();
op.reset_storage(storage.iter().cloned()).unwrap();
op.set_block_data(
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
@@ -642,13 +779,14 @@ mod tests {
assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
}
hash
};
{
let mut op = db.begin_operation(BlockId::Number(0)).unwrap();
let mut header = Header {
number: 1,
parent_hash: Default::default(),
parent_hash: hash,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
@@ -667,7 +805,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
@@ -711,7 +849,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
@@ -745,7 +883,7 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
@@ -777,13 +915,17 @@ mod tests {
header,
Some(vec![]),
None,
true
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none());
// block not yet finalized, so state not pruned.
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_some());
}
backend.finalize_block(BlockId::Number(2)).unwrap();
assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none());
}
#[test]
@@ -831,7 +973,7 @@ mod tests {
BlockId::Number(number - 1)
};
let mut op = backend.begin_operation(block_id).unwrap();
op.set_block_data(header, None, None, true).unwrap();
op.set_block_data(header, None, None, NewBlockState::Best).unwrap();
op.update_changes_trie(changes_trie_update).unwrap();
backend.commit_operation(op).unwrap();
+205 -74
View File
@@ -21,6 +21,7 @@ use parking_lot::RwLock;
use kvdb::{KeyValueDB, DBTransaction};
use client::backend::NewBlockState;
use client::blockchain::{BlockStatus, Cache as BlockchainCache,
HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo};
use client::cht;
@@ -29,29 +30,30 @@ use client::light::blockchain::Storage as LightBlockchainStorage;
use codec::{Decode, Encode};
use primitives::{AuthorityId, H256, Blake2Hasher};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash, HashFor,
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
Zero, One, As, NumberFor};
use cache::DbCache;
use utils::{meta_keys, Meta, db_err, number_to_db_key, db_key_to_number, open_database,
use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database,
read_db, read_id, read_meta};
use DatabaseSettings;
pub(crate) mod columns {
pub const META: Option<u32> = ::utils::COLUMN_META;
pub const BLOCK_INDEX: Option<u32> = Some(1);
pub const HASH_LOOKUP: Option<u32> = Some(1);
pub const HEADER: Option<u32> = Some(2);
pub const AUTHORITIES: Option<u32> = Some(3);
pub const CHT: Option<u32> = Some(4);
}
/// Keep authorities for last 'AUTHORITIES_ENTRIES_TO_KEEP' blocks.
#[allow(unused)]
pub(crate) const AUTHORITIES_ENTRIES_TO_KEEP: u64 = cht::SIZE;
/// Light blockchain storage. Stores most recent headers + CHTs for older headers.
pub struct LightStorage<Block: BlockT> {
db: Arc<KeyValueDB>,
meta: RwLock<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>>,
cache: DbCache<Block>,
_cache: DbCache<Block>,
}
#[derive(Clone, PartialEq, Debug)]
@@ -83,13 +85,18 @@ impl<Block> LightStorage<Block>
}
fn from_kvdb(db: Arc<KeyValueDB>) -> ClientResult<Self> {
let cache = DbCache::new(db.clone(), columns::BLOCK_INDEX, columns::AUTHORITIES)?;
let cache = DbCache::new(
db.clone(),
columns::HASH_LOOKUP,
columns::HEADER,
columns::AUTHORITIES
)?;
let meta = RwLock::new(read_meta::<Block>(&*db, columns::HEADER)?);
Ok(LightStorage {
db,
meta,
cache,
_cache: cache,
})
}
@@ -100,12 +107,18 @@ impl<Block> LightStorage<Block>
#[cfg(test)]
pub(crate) fn cache(&self) -> &DbCache<Block> {
&self.cache
&self._cache
}
fn update_meta(&self, hash: Block::Hash, number: <<Block as BlockT>::Header as HeaderT>::Number, is_best: bool) {
fn update_meta(
&self,
hash: Block::Hash,
number: <<Block as BlockT>::Header as HeaderT>::Number,
is_best: bool,
is_finalized: bool,
) {
let mut meta = self.meta.write();
if is_best {
let mut meta = self.meta.write();
if number == <<Block as BlockT>::Header as HeaderT>::Number::zero() {
meta.genesis_hash = hash;
}
@@ -113,6 +126,15 @@ impl<Block> LightStorage<Block>
meta.best_number = number;
meta.best_hash = hash;
}
if is_finalized {
if number == <<Block as BlockT>::Header as HeaderT>::Number::zero() {
meta.genesis_hash = hash;
}
meta.finalized_number = number;
meta.finalized_hash = hash;
}
}
}
@@ -121,13 +143,7 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
Block: BlockT,
{
fn header(&self, id: BlockId<Block>) -> ClientResult<Option<Block::Header>> {
match read_db(&*self.db, columns::BLOCK_INDEX, columns::HEADER, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(ClientErrorKind::Backend("Error decoding header".into()).into()),
}
None => Ok(None),
}
::utils::read_header(&*self.db, columns::HASH_LOOKUP, columns::HEADER, id)
}
fn info(&self) -> ClientResult<BlockchainInfo<Block>> {
@@ -141,7 +157,12 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
fn status(&self, id: BlockId<Block>) -> ClientResult<BlockStatus> {
let exists = match id {
BlockId::Hash(_) => read_id(&*self.db, columns::BLOCK_INDEX, id)?.is_some(),
BlockId::Hash(_) => read_db(
&*self.db,
columns::HASH_LOOKUP,
columns::HEADER,
id
)?.is_some(),
BlockId::Number(n) => n <= self.meta.read().best_number,
};
match exists {
@@ -151,17 +172,90 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
}
fn number(&self, hash: Block::Hash) -> ClientResult<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
read_id::<Block>(&*self.db, columns::BLOCK_INDEX, BlockId::Hash(hash))
.and_then(|key| match key {
Some(key) => Ok(Some(db_key_to_number(&key)?)),
None => Ok(None),
})
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
}
fn hash(&self, number: <<Block as BlockT>::Header as HeaderT>::Number) -> ClientResult<Option<Block::Hash>> {
read_db::<Block>(&*self.db, columns::BLOCK_INDEX, columns::HEADER, BlockId::Number(number)).map(|x|
x.map(|raw| HashFor::<Block>::hash(&raw[..])).map(Into::into)
)
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
}
}
impl<Block: BlockT> LightStorage<Block> where Block::Hash: From<H256> {
// note that a block is finalized. ensure that best chain contains the finalized
// block number first.
fn note_finalized(&self, transaction: &mut DBTransaction, header: &Block::Header, hash: Block::Hash) -> ClientResult<()> {
const NOTEWORTHY_FINALIZATION_GAP: u64 = 32;
// TODO: ensure this doesn't conflict with old finalized block.
let meta = self.meta.read();
let f_num = header.number().clone();
let number_u64: u64 = f_num.as_();
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
let (last_finalized_hash, last_finalized_number)
= (meta.finalized_hash.clone(), meta.finalized_number);
let finalized_gap = f_num - last_finalized_number;
if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP {
info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&hash, f_num));
} else {
debug!(target: "db", "Finalizing blocks from {:?} to {:?}",
(&last_finalized_hash, last_finalized_number), (&hash, f_num));
}
// build new CHT if required
let mut build_cht = |header: &Block::Header| -> ClientResult<()> {
if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) {
let new_cht_start: NumberFor<Block> = cht::start_number(cht::SIZE, new_cht_number);
let new_cht_root: Option<Block::Hash> = cht::compute_root::<Block::Header, Blake2Hasher, _>(
cht::SIZE, new_cht_number, (new_cht_start.as_()..)
.map(|num| self.hash(As::sa(num)).unwrap_or_default())
);
if let Some(new_cht_root) = new_cht_root {
transaction.put(columns::CHT, &number_to_lookup_key(new_cht_start), new_cht_root.as_ref());
let mut prune_block = new_cht_start;
let new_cht_end = cht::end_number(cht::SIZE, new_cht_number);
trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number);
while prune_block <= new_cht_end {
let id = read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?;
if let Some(hash) = id {
let lookup_key = number_to_lookup_key(prune_block);
transaction.delete(columns::HASH_LOOKUP, &lookup_key);
transaction.delete(columns::HEADER, hash.as_ref());
}
prune_block += <<Block as BlockT>::Header as HeaderT>::Number::one();
}
}
}
Ok(())
};
// attempt to build CHT for all newly finalized blocks.
let last_finalized_u64 = last_finalized_number.as_();
for num in (last_finalized_u64..number_u64).map(|x| x + 1) {
let num = As::sa(num);
if num == f_num {
build_cht(header)?;
} else {
let old_header = match self.header(BlockId::Number(num))? {
Some(x) => x,
None => panic!("finalizing block {} implies existence of block {}; qed", f_num, num),
};
build_cht(&old_header)?;
}
}
Ok(())
}
}
@@ -170,63 +264,75 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
Block: BlockT,
Block::Hash: From<H256>,
{
fn import_header(&self, is_new_best: bool, header: Block::Header, authorities: Option<Vec<AuthorityId>>) -> ClientResult<()> {
fn import_header(
&self,
header: Block::Header,
_authorities: Option<Vec<AuthorityId>>,
leaf_state: NewBlockState,
) -> ClientResult<()> {
let mut transaction = DBTransaction::new();
let hash = header.hash();
let number = *header.number();
let key = number_to_db_key(number);
transaction.put(columns::HEADER, &key, &header.encode());
transaction.put(columns::BLOCK_INDEX, hash.as_ref(), &key);
transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
let best_authorities = if is_new_best {
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
if leaf_state.is_best() {
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
// cache authorities for previous block
let number: u64 = number.as_();
let previous_number = number.checked_sub(1);
let best_authorities = previous_number
.and_then(|previous_number| self.cache.authorities_at_cache()
.commit_best_entry(&mut transaction, As::sa(previous_number), authorities));
// handle reorg.
{
let meta = self.meta.read();
if meta.best_hash != Default::default() {
let parent_hash = *header.parent_hash();
let tree_route = ::utils::tree_route::<Block>(
&*self.db,
columns::HEADER,
meta.best_hash,
parent_hash,
)?;
// prune authorities from 'ancient' blocks
if let Some(ancient_number) = number.checked_sub(AUTHORITIES_ENTRIES_TO_KEEP) {
self.cache.authorities_at_cache().prune_entries(&mut transaction, As::sa(ancient_number))?;
}
// update block number to hash lookup entries.
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
// TODO: can we recover here?
warn!("Safety failure: reverting finalized block {:?}",
(&retracted.number, &retracted.hash));
}
best_authorities
} else {
None
};
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
}
// build new CHT if required
if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) {
let new_cht_start: NumberFor<Block> = cht::start_number(cht::SIZE, new_cht_number);
let new_cht_root: Option<Block::Hash> = cht::compute_root::<Block::Header, Blake2Hasher, _>(
cht::SIZE, new_cht_number, (new_cht_start.as_()..)
.map(|num| self.hash(As::sa(num)).unwrap_or_default()));
if let Some(new_cht_root) = new_cht_root {
transaction.put(columns::CHT, &number_to_db_key(new_cht_start), new_cht_root.as_ref());
let mut prune_block = new_cht_start;
let new_cht_end = cht::end_number(cht::SIZE, new_cht_number);
trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number);
while prune_block <= new_cht_end {
transaction.delete(columns::HEADER, &number_to_db_key(prune_block));
prune_block += <<Block as BlockT>::Header as HeaderT>::Number::one();
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
}
}
}
// TODO: cache authorities for previous block, accounting for reorgs.
}
let finalized = match leaf_state {
NewBlockState::Final => true,
_ => false,
};
if finalized {
self.note_finalized(&mut transaction, &header, hash)?;
}
debug!("Light DB Commit {:?} ({})", hash, number);
self.db.write(transaction).map_err(db_err)?;
self.update_meta(hash, number, is_new_best);
if let Some(best_authorities) = best_authorities {
self.cache.authorities_at_cache().update_best_entry(Some(best_authorities));
}
self.update_meta(hash, number, leaf_state.is_best(), finalized);
Ok(())
}
@@ -236,13 +342,31 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?;
let cht_start = cht::start_number(cht_size, cht_number);
self.db.get(columns::CHT, &number_to_db_key(cht_start)).map_err(db_err)?
self.db.get(columns::CHT, &number_to_lookup_key(cht_start)).map_err(db_err)?
.ok_or_else(no_cht_for_block)
.and_then(|hash| Block::Hash::decode(&mut &*hash).ok_or_else(no_cht_for_block))
}
fn finalize_header(&self, id: BlockId<Block>) -> ClientResult<()> {
if let Some(header) = self.header(id)? {
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.db.write(transaction).map_err(db_err)?;
self.update_meta(hash, header.number().clone(), false, true);
Ok(())
} else {
Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into())
}
}
fn last_finalized(&self) -> ClientResult<Block::Hash> {
Ok(self.meta.read().finalized_hash.clone())
}
fn cache(&self) -> Option<&BlockchainCache<Block>> {
Some(&self.cache)
None
}
}
@@ -269,7 +393,7 @@ pub(crate) mod tests {
};
let hash = header.hash();
db.import_header(true, header, authorities).unwrap();
db.import_header(header, authorities, NewBlockState::Best).unwrap();
hash
}
@@ -328,15 +452,15 @@ pub(crate) mod tests {
let genesis_hash = insert_block(&db, &Default::default(), 0, None);
assert_eq!(db.db.iter(columns::HEADER).count(), 1);
assert_eq!(db.db.iter(columns::BLOCK_INDEX).count(), 1);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), 1);
let _ = insert_block(&db, &genesis_hash, 1, None);
assert_eq!(db.db.iter(columns::HEADER).count(), 2);
assert_eq!(db.db.iter(columns::BLOCK_INDEX).count(), 2);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), 2);
}
#[test]
fn ancient_headers_are_replaced_with_cht() {
fn finalized_ancient_headers_are_replaced_with_cht() {
let db = LightStorage::new_test();
// insert genesis block header (never pruned)
@@ -357,10 +481,16 @@ pub(crate) mod tests {
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned
insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None);
// nothing is yet finalized, so nothing is pruned.
prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None);
assert_eq!(db.db.iter(columns::HEADER).count(), (2 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// now finalize the block.
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 1);
assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_db_key(1 + i)).unwrap().is_none()));
assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_lookup_key(1 + i)).unwrap().is_none()));
}
#[test]
@@ -383,6 +513,7 @@ pub(crate) mod tests {
prev_hash = insert_block(&db, &prev_hash, i as u64, None);
}
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
let cht_root_1 = db.cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)).unwrap();
let cht_root_2 = db.cht_root(cht::SIZE, (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64).unwrap();
let cht_root_3 = db.cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)).unwrap();
+214 -35
View File
@@ -27,7 +27,7 @@ use client;
use codec::Decode;
use hashdb::DBValue;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Hash, HashFor, Zero};
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, Zero};
use DatabaseSettings;
/// Number of columns in the db. Must be the same for both full && light dbs.
@@ -42,8 +42,12 @@ pub mod meta_keys {
pub const TYPE: &[u8; 4] = b"type";
/// Best block key.
pub const BEST_BLOCK: &[u8; 4] = b"best";
/// Last finalized block key.
pub const FINALIZED_BLOCK: &[u8; 5] = b"final";
/// Best authorities block key.
pub const BEST_AUTHORITIES: &[u8; 4] = b"auth";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
}
/// Database metadata.
@@ -52,15 +56,19 @@ pub struct Meta<N, H> {
pub best_hash: H,
/// Number of the best known block.
pub best_number: N,
/// Hash of the best finalized block.
pub finalized_hash: H,
/// Number of the best finalized block.
pub finalized_number: N,
/// Hash of the genesis block.
pub genesis_hash: H,
}
/// Type of block key in the database (LE block number).
pub type BlockKey = [u8; 4];
/// A block lookup key: used for canonical lookup from block number to hash
pub type BlockLookupKey = [u8; 4];
/// Convert block number into key (LE representation).
pub fn number_to_db_key<N>(n: N) -> BlockKey where N: As<u64> {
/// Convert block number into lookup key (LE representation).
pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
let n: u64 = n.as_();
assert!(n & 0xffffffff00000000 == 0);
@@ -72,8 +80,8 @@ pub fn number_to_db_key<N>(n: N) -> BlockKey where N: As<u64> {
]
}
/// Convert block key into block number.
pub fn db_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
/// Convert block lookup key into block number.
pub fn lookup_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
match key.len() {
4 => Ok((key[0] as u64) << 24
| (key[1] as u64) << 16
@@ -114,19 +122,24 @@ pub fn open_database(config: &DatabaseSettings, db_type: &str) -> client::error:
Ok(Arc::new(db))
}
/// Convert block id to block key, reading number from db if required.
pub fn read_id<Block>(db: &KeyValueDB, col_index: Option<u32>, id: BlockId<Block>) -> Result<Option<BlockKey>, client::error::Error>
/// Convert block id to block key, looking up canonical hash by number from DB as necessary.
pub fn read_id<Block>(db: &KeyValueDB, col_index: Option<u32>, id: BlockId<Block>) -> Result<Option<Block::Hash>, client::error::Error>
where
Block: BlockT,
{
match id {
BlockId::Hash(h) => db.get(col_index, h.as_ref())
.map(|v| v.map(|v| {
let mut key: [u8; 4] = [0; 4];
key.copy_from_slice(&v);
key
})).map_err(db_err),
BlockId::Number(n) => Ok(Some(number_to_db_key(n))),
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => db.get(col_index, &number_to_lookup_key(n)).map(|v|
v.map(|v| {
let mut h = <Block::Hash>::default();
{
let h = h.as_mut();
let len = ::std::cmp::min(v.len(), h.len());
h.as_mut().copy_from_slice(&v[..len]);
}
h
})
).map_err(db_err),
}
}
@@ -136,39 +149,205 @@ pub fn read_db<Block>(db: &KeyValueDB, col_index: Option<u32>, col: Option<u32>,
Block: BlockT,
{
read_id(db, col_index, id).and_then(|key| match key {
Some(key) => db.get(col, &key).map_err(db_err),
Some(key) => db.get(col, key.as_ref()).map_err(db_err),
None => Ok(None),
})
}
/// Read a header from the database.
pub fn read_header<Block: BlockT>(
db: &KeyValueDB,
col_index: Option<u32>,
col: Option<u32>,
id: BlockId<Block>,
) -> client::error::Result<Option<Block::Header>> {
match read_db(db, col_index, col, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Some(header) => Ok(Some(header)),
None => return Err(
client::error::ErrorKind::Backend("Error decoding header".into()).into()
),
}
None => Ok(None),
}
}
/// Read meta from the database.
pub fn read_meta<Block>(db: &KeyValueDB, col_header: Option<u32>) -> Result<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>, client::error::Error>
pub fn read_meta<Block>(db: &KeyValueDB, col_header: Option<u32>) -> Result<
Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>,
client::error::Error,
>
where
Block: BlockT,
{
let genesis_number = <<Block as BlockT>::Header as HeaderT>::Number::zero();
let (best_hash, best_number) = if let Some(Some(header)) = db.get(COLUMN_META, meta_keys::BEST_BLOCK).and_then(|id|
match id {
Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))),
None => Ok(None),
}).map_err(db_err)?
{
let hash = header.hash();
debug!("DB Opened blockchain db, best {:?} ({})", hash, header.number());
(hash, *header.number())
} else {
(Default::default(), genesis_number)
let genesis_hash: Block::Hash = match db.get(COLUMN_META, meta_keys::GENESIS_HASH).map_err(db_err)? {
Some(h) => match Decode::decode(&mut &h[..]) {
Some(h) => h,
None => return Err(client::error::ErrorKind::Backend("Error decoding genesis hash".into()).into()),
},
None => return Ok(Meta {
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
}),
};
let genesis_hash = db.get(col_header, &number_to_db_key(genesis_number))
.map_err(db_err)?
.map(|raw| HashFor::<Block>::hash(&raw[..]))
.unwrap_or_default()
.into();
let load_meta_block = |desc, key| -> Result<_, client::error::Error> {
if let Some(Some(header)) = db.get(COLUMN_META, key).and_then(|id|
match id {
Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]))),
None => Ok(None),
}).map_err(db_err)?
{
let hash = header.hash();
debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number());
Ok((hash, *header.number()))
} else {
Ok((genesis_hash.clone(), Zero::zero()))
}
};
let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?;
let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?;
Ok(Meta {
best_hash,
best_number,
finalized_hash,
finalized_number,
genesis_hash,
})
}
/// An entry in a tree route.
#[derive(Debug)]
pub struct RouteEntry<Block: BlockT> {
/// The number of the block.
pub number: <Block::Header as HeaderT>::Number,
/// The hash of the block.
pub hash: Block::Hash,
}
/// A tree-route from one block to another in the chain.
///
/// All blocks prior to the pivot in the deque is the reverse-order unique ancestry
/// of the first block, the block at the pivot index is the common ancestor,
/// and all blocks after the pivot is the ancestry of the second block, in
/// order.
///
/// The ancestry sets will include the given blocks, and thus the tree-route is
/// never empty.
///
/// ```ignore
/// Tree route from R1 to E2. Retracted is [R1, R2, R3], Common is C, enacted [E1, E2]
/// <- R3 <- R2 <- R1
/// /
/// C
/// \-> E1 -> E2
/// ```
///
/// ```ignore
/// Tree route from C to E2. Retracted empty. Common is C, enacted [E1, E2]
/// C -> E1 -> E2
/// ```
#[derive(Debug)]
pub struct TreeRoute<Block: BlockT> {
route: Vec<RouteEntry<Block>>,
pivot: usize,
}
impl<Block: BlockT> TreeRoute<Block> {
/// Get an iterator of all retracted blocks in reverse order (towards common ancestor)
pub fn retracted(&self) -> impl Iterator<Item=&RouteEntry<Block>> {
self.route.iter().take(self.pivot)
}
/// Get the common ancestor block. This might be one of the two blocks of the
/// route.
#[allow(unused)]
pub fn common_block(&self) -> &RouteEntry<Block> {
self.route.get(self.pivot).expect("tree-routes are computed between blocks; \
which are included in the route; \
thus it is never empty; qed")
}
/// Get an iterator of enacted blocks (descendents of the common ancestor)
pub fn enacted(&self) -> impl Iterator<Item=&RouteEntry<Block>> {
self.route.iter().skip(self.pivot + 1)
}
}
/// Compute a tree-route between two blocks. See tree-route docs for more details.
pub fn tree_route<Block: BlockT>(
db: &KeyValueDB,
col_header: Option<u32>,
from: Block::Hash,
to: Block::Hash,
) -> Result<TreeRoute<Block>, client::error::Error> {
use runtime_primitives::traits::Header;
let load_header = |hash: &Block::Hash| {
match db.get(col_header, hash.as_ref()).map_err(db_err) {
Ok(Some(b)) => match <Block::Header>::decode(&mut &b[..]) {
Some(hdr) => Ok(hdr),
None => Err(client::error::ErrorKind::Backend("Error decoding header".into()).into()),
}
Ok(None) => Err(client::error::ErrorKind::UnknownBlock(format!("Unknown block {:?}", hash)).into()),
Err(e) => Err(e),
}
};
let mut from = load_header(&from)?;
let mut to = load_header(&to)?;
let mut from_branch = Vec::new();
let mut to_branch = Vec::new();
while to.number() > from.number() {
to_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
to = load_header(to.parent_hash())?;
}
while from.number() > to.number() {
from_branch.push(RouteEntry {
number: from.number().clone(),
hash: from.hash(),
});
from = load_header(from.parent_hash())?;
}
// numbers are equal now. walk backwards until the block is the same
while to != from {
to_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
to = load_header(to.parent_hash())?;
from_branch.push(RouteEntry {
number: from.number().clone(),
hash: from.hash(),
});
from = load_header(from.parent_hash())?;
}
// add the pivot block. and append the reversed to-branch (note that it's reverse order originalls)
let pivot = from_branch.len();
from_branch.push(RouteEntry {
number: to.number().clone(),
hash: to.hash(),
});
from_branch.extend(to_branch.into_iter().rev());
Ok(TreeRoute {
route: from_branch,
pivot,
})
}
+28 -1
View File
@@ -27,6 +27,27 @@ use patricia_trie::NodeCodec;
use hashdb::Hasher;
use memorydb::MemoryDB;
/// State of a new block.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NewBlockState {
/// Normal block.
Normal,
/// New best block.
Best,
/// Newly finalized block (implicitly best).
Final,
}
impl NewBlockState {
/// Whether this block is the new best block.
pub fn is_best(self) -> bool {
match self {
NewBlockState::Best | NewBlockState::Final => true,
NewBlockState::Normal => false,
}
}
}
/// Block insertion operation. Keeps hold if the inserted block state and data.
pub trait BlockImportOperation<Block, H, C>
where
@@ -45,7 +66,7 @@ where
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
is_new_best: bool
state: NewBlockState,
) -> error::Result<()>;
/// Append authorities set to the transaction. This is a set of parent block (set which
@@ -87,6 +108,12 @@ where
fn begin_operation(&self, block: BlockId<Block>) -> error::Result<Self::BlockImportOperation>;
/// Commit block insertion.
fn commit_operation(&self, transaction: Self::BlockImportOperation) -> error::Result<()>;
/// Finalize block with given Id. This should also implicitly finalize all ancestors.
///
/// If the finalized block is not an ancestor of the current "best block", then
/// the chain will be implicitly reorganized to the best chain containing the newly
/// finalized block.
fn finalize_block(&self, block: BlockId<Block>) -> error::Result<()>;
/// Returns reference to blockchain backend.
fn blockchain(&self) -> &Self::Blockchain;
/// Returns reference to changes trie storage.
+2
View File
@@ -48,6 +48,8 @@ pub trait Backend<Block: BlockT>: HeaderBackend<Block> {
fn body(&self, id: BlockId<Block>) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justification. Returns `None` if justification does not exist.
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification<Block::Hash>>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns data cache reference, if it is enabled on this backend.
fn cache(&self) -> Option<&Cache<Block>>;
+43 -5
View File
@@ -195,7 +195,12 @@ impl<B, E, Block> Client<B, E, Block> where
info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash());
let mut op = backend.begin_operation(BlockId::Hash(Default::default()))?;
op.reset_storage(genesis_storage.into_iter())?;
op.set_block_data(genesis_block.deconstruct().0, Some(vec![]), None, true)?;
op.set_block_data(
genesis_block.deconstruct().0,
Some(vec![]),
None,
::backend::NewBlockState::Final
)?;
backend.commit_operation(op)?;
}
Ok(Client {
@@ -383,6 +388,7 @@ impl<B, E, Block> Client<B, E, Block> where
origin: BlockOrigin,
header: JustifiedHeader<Block>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
finalized: bool,
) -> error::Result<ImportResult> {
let (header, justification, authorities) = header.into_inner();
let parent_hash = header.parent_hash().clone();
@@ -394,7 +400,17 @@ impl<B, E, Block> Client<B, E, Block> where
let _import_lock = self.import_lock.lock();
let height: u64 = header.number().as_();
*self.importing_block.write() = Some(hash);
let result = self.execute_and_import_block(origin, hash, header, justification, body, authorities);
let result = self.execute_and_import_block(
origin,
hash,
header,
justification,
body,
authorities,
finalized
);
*self.importing_block.write() = None;
telemetry!("block.import";
"height" => height,
@@ -412,6 +428,7 @@ impl<B, E, Block> Client<B, E, Block> where
justification: bft::Justification<Block::Hash>,
body: Option<Vec<Block::Extrinsic>>,
authorities: Vec<AuthorityId>,
finalized: bool,
) -> error::Result<ImportResult> {
let parent_hash = header.parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(hash))? {
@@ -454,9 +471,24 @@ impl<B, E, Block> Client<B, E, Block> where
};
let is_new_best = header.number() == &(self.backend.blockchain().info()?.best_number + One::one());
let leaf_state = if finalized {
::backend::NewBlockState::Final
} else if is_new_best {
::backend::NewBlockState::Best
} else {
::backend::NewBlockState::Normal
};
trace!("Imported {}, (#{}), best={}, origin={:?}", hash, header.number(), is_new_best, origin);
let unchecked: bft::UncheckedJustification<_> = justification.uncheck().into();
transaction.set_block_data(header.clone(), body, Some(unchecked.into()), is_new_best)?;
transaction.set_block_data(
header.clone(),
body,
Some(unchecked.into()),
leaf_state,
)?;
transaction.update_authorities(authorities);
if let Some(storage_update) = storage_update {
transaction.update_storage(storage_update)?;
@@ -601,7 +633,7 @@ impl<B, E, Block> bft::BlockImport<Block> for Client<B, E, Block>
&self,
block: Block,
justification: ::bft::Justification<Block::Hash>,
authorities: &[AuthorityId]
authorities: &[AuthorityId],
) -> bool {
let (header, extrinsics) = block.deconstruct();
let justified_header = JustifiedHeader {
@@ -610,7 +642,13 @@ impl<B, E, Block> bft::BlockImport<Block> for Client<B, E, Block>
authorities: authorities.to_vec(),
};
self.import_block(BlockOrigin::ConsensusBroadcast, justified_header, Some(extrinsics)).is_ok()
// TODO [rob]: non-instant finality.
self.import_block(
BlockOrigin::ConsensusBroadcast,
justified_header,
Some(extrinsics),
true
).is_ok()
}
}
+50 -14
View File
@@ -20,14 +20,14 @@ use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use error;
use backend;
use backend::{self, NewBlockState};
use light;
use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero,
NumberFor, As, Digest, DigestItem};
use runtime_primitives::bft::Justification;
use blockchain::{self, BlockStatus};
use blockchain::{self, BlockStatus, HeaderBackend};
use state_machine::backend::{Backend as StateBackend, InMemory};
use state_machine::InMemoryChangesTrieStorage;
use patricia_trie::NodeCodec;
@@ -37,7 +37,7 @@ use memorydb::MemoryDB;
struct PendingBlock<B: BlockT> {
block: StoredBlock<B>,
is_best: bool,
state: NewBlockState,
}
#[derive(PartialEq, Eq, Clone)]
@@ -91,6 +91,7 @@ struct BlockchainStorage<Block: BlockT> {
hashes: HashMap<NumberFor<Block>, Block::Hash>,
best_hash: Block::Hash,
best_number: NumberFor<Block>,
finalized_hash: Block::Hash,
genesis_hash: Block::Hash,
cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
}
@@ -136,6 +137,7 @@ impl<Block: BlockT> Blockchain<Block> {
hashes: HashMap::new(),
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
genesis_hash: Default::default(),
cht_roots: HashMap::new(),
}));
@@ -155,16 +157,21 @@ impl<Block: BlockT> Blockchain<Block> {
header: <Block as BlockT>::Header,
justification: Option<Justification<Block::Hash>>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
is_new_best: bool
new_state: NewBlockState,
) {
let number = header.number().clone();
let mut storage = self.storage.write();
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
storage.hashes.insert(number, hash.clone());
if is_new_best {
if new_state.is_best() {
storage.best_hash = hash.clone();
storage.best_number = number.clone();
}
if let NewBlockState::Final = new_state {
storage.finalized_hash = hash;
}
if number == Zero::zero() {
storage.genesis_hash = hash;
}
@@ -189,9 +196,19 @@ impl<Block: BlockT> Blockchain<Block> {
pub fn insert_cht_root(&self, block: NumberFor<Block>, cht_root: Block::Hash) {
self.storage.write().cht_roots.insert(block, cht_root);
}
fn finalize_header(&self, id: BlockId<Block>) -> error::Result<()> {
let hash = match self.header(id)? {
Some(h) => h.hash(),
None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()),
};
self.storage.write().finalized_hash = hash;
Ok(())
}
}
impl<Block: BlockT> blockchain::HeaderBackend<Block> for Blockchain<Block> {
impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
fn header(&self, id: BlockId<Block>) -> error::Result<Option<<Block as BlockT>::Header>> {
Ok(self.id(id).and_then(|hash| {
self.storage.read().blocks.get(&hash).map(|b| b.header().clone())
@@ -238,6 +255,10 @@ impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
))
}
fn last_finalized(&self) -> error::Result<Block::Hash> {
Ok(self.storage.read().finalized_hash.clone())
}
fn cache(&self) -> Option<&blockchain::Cache<Block>> {
Some(&self.cache)
}
@@ -249,19 +270,28 @@ impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
{
fn import_header(
&self,
is_new_best: bool,
header: Block::Header,
authorities: Option<Vec<AuthorityId>>
authorities: Option<Vec<AuthorityId>>,
state: NewBlockState,
) -> error::Result<()> {
let hash = header.hash();
let parent_hash = *header.parent_hash();
self.insert(hash, header, None, None, is_new_best);
if is_new_best {
self.insert(hash, header, None, None, state);
if state.is_best() {
self.cache.insert(parent_hash, authorities);
}
Ok(())
}
fn last_finalized(&self) -> error::Result<Block::Hash> {
Ok(self.storage.read().finalized_hash.clone())
}
fn finalize_header(&self, id: BlockId<Block>) -> error::Result<()> {
Blockchain::finalize_header(self, id)
}
fn cht_root(&self, _cht_size: u64, block: NumberFor<Block>) -> error::Result<Block::Hash> {
self.storage.read().cht_roots.get(&block).cloned()
.ok_or_else(|| error::ErrorKind::Backend(format!("CHT for block {} not exists", block)).into())
@@ -299,12 +329,12 @@ where
header: <Block as BlockT>::Header,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
is_new_best: bool
state: NewBlockState,
) -> error::Result<()> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
self.pending_block = Some(PendingBlock {
block: StoredBlock::new(header, body, justification),
is_best: is_new_best,
state,
});
Ok(())
}
@@ -395,6 +425,7 @@ where
let parent_hash = *header.parent_hash();
self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone()));
let changes_trie_root = header.digest().logs().iter()
.find(|log| log.as_changes_trie_root().is_some())
.and_then(DigestItem::as_changes_trie_root)
@@ -405,15 +436,20 @@ where
self.changes_trie_storage.insert(header.number().as_(), changes_trie_root, changes_trie_update);
}
}
self.blockchain.insert(hash, header, justification, body, pending_block.is_best);
self.blockchain.insert(hash, header, justification, body, pending_block.state);
// dumb implementation - store value for each block
if pending_block.is_best {
if pending_block.state.is_best() {
self.blockchain.cache.insert(parent_hash, operation.pending_authorities);
}
}
Ok(())
}
fn finalize_block(&self, block: BlockId<Block>) -> error::Result<()> {
self.blockchain.finalize_header(block)
}
fn blockchain(&self) -> &Self::Blockchain {
&self.blockchain
}
+14 -6
View File
@@ -26,7 +26,7 @@ use runtime_primitives::{bft::Justification, generic::BlockId};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use state_machine::{Backend as StateBackend, InMemoryChangesTrieStorage, TrieBackend};
use backend::{Backend as ClientBackend, BlockImportOperation, RemoteBackend};
use backend::{Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState};
use blockchain::HeaderBackend as BlockchainHeaderBackend;
use error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
use light::blockchain::{Blockchain, Storage as BlockchainStorage};
@@ -43,9 +43,9 @@ pub struct Backend<S, F> {
/// Light block (header and justification) import operation.
pub struct ImportOperation<Block: BlockT, S, F> {
is_new_best: bool,
header: Option<Block::Header>,
authorities: Option<Vec<AuthorityId>>,
leaf_state: NewBlockState,
_phantom: ::std::marker::PhantomData<(S, F)>,
}
@@ -84,16 +84,24 @@ impl<S, F, Block, H, C> ClientBackend<Block, H, C> for Backend<S, F> where
fn begin_operation(&self, _block: BlockId<Block>) -> ClientResult<Self::BlockImportOperation> {
Ok(ImportOperation {
is_new_best: false,
header: None,
authorities: None,
leaf_state: NewBlockState::Normal,
_phantom: Default::default(),
})
}
fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
let header = operation.header.expect("commit is called after set_block_data; set_block_data sets header; qed");
self.blockchain.storage().import_header(operation.is_new_best, header, operation.authorities)
self.blockchain.storage().import_header(
header,
operation.authorities,
operation.leaf_state,
)
}
fn finalize_block(&self, block: BlockId<Block>) -> ClientResult<()> {
self.blockchain.storage().finalize_header(block)
}
fn blockchain(&self) -> &Blockchain<S, F> {
@@ -153,9 +161,9 @@ where
header: Block::Header,
_body: Option<Vec<Block::Extrinsic>>,
_justification: Option<Justification<Block::Hash>>,
is_new_best: bool
state: NewBlockState,
) -> ClientResult<()> {
self.is_new_best = is_new_best;
self.leaf_state = state;
self.header = Some(header);
Ok(())
}
+14 -3
View File
@@ -25,6 +25,7 @@ use primitives::AuthorityId;
use runtime_primitives::{bft::Justification, generic::BlockId};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
use backend::NewBlockState;
use blockchain::{Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache,
HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo};
use cht;
@@ -33,14 +34,20 @@ use light::fetcher::{Fetcher, RemoteHeaderRequest};
/// Light client blockchain storage.
pub trait Storage<Block: BlockT>: BlockchainHeaderBackend<Block> {
/// Store new header.
/// Store new header. Should refuse to revert any finalized blocks.
fn import_header(
&self,
is_new_best: bool,
header: Block::Header,
authorities: Option<Vec<AuthorityId>>
authorities: Option<Vec<AuthorityId>>,
state: NewBlockState,
) -> ClientResult<()>;
/// Mark historic header as finalized.
fn finalize_header(&self, block: BlockId<Block>) -> ClientResult<()>;
/// Get last finalized header.
fn last_finalized(&self) -> ClientResult<Block::Hash>;
/// Get CHT root for given block. Fails if the block is not pruned (not a part of any CHT).
fn cht_root(&self, cht_size: u64, block: NumberFor<Block>) -> ClientResult<Block::Hash>;
@@ -136,6 +143,10 @@ impl<S, F, Block> BlockchainBackend<Block> for Blockchain<S, F> where Block: Blo
Ok(None)
}
fn last_finalized(&self) -> ClientResult<Block::Hash> {
self.storage.last_finalized()
}
fn cache(&self) -> Option<&BlockchainCache<Block>> {
self.storage.cache()
}
+7 -1
View File
@@ -230,7 +230,13 @@ pub mod tests {
// check remote read proof locally
let local_storage = InMemoryBlockchain::<Block>::new();
local_storage.insert(remote_block_hash, remote_block_header.clone(), None, None, true);
local_storage.insert(
remote_block_hash,
remote_block_header.clone(),
None,
None,
::backend::NewBlockState::Final,
);
let local_executor = test_client::LocalExecutor::new();
let local_checker = LightDataChecker::new(local_executor);
(local_checker, remote_block_header, remote_read_proof, authorities_len)
+2 -2
View File
@@ -62,9 +62,9 @@ impl<B, E, Block> Client<Block> for SubstrateClient<B, E, Block> where
Block: BlockT,
{
fn import(&self, origin: BlockOrigin, header: Block::Header, justification: Justification<Block::Hash>, body: Option<Vec<Block::Extrinsic>>) -> Result<ImportResult, Error> {
// TODO: defer justification check.
// TODO: defer justification check and non-instant finality.
let justified_header = self.check_justification(header, justification.into())?;
(self as &SubstrateClient<B, E, Block>).import_block(origin, justified_header, body)
(self as &SubstrateClient<B, E, Block>).import_block(origin, justified_header, body, true)
}
fn info(&self) -> Result<ClientInfo<Block>, Error> {
+2 -1
View File
@@ -102,8 +102,9 @@ pub fn import_blocks<F, E, R>(config: FactoryFullConfiguration<F>, exit: E, mut
}
match SignedBlock::decode(&mut input) {
Some(block) => {
// TODO: non-instant finality.
let header = client.check_justification(block.block.header, block.justification.into())?;
client.import_block(BlockOrigin::File, header, Some(block.block.extrinsics))?;
client.import_block(BlockOrigin::File, header, Some(block.block.extrinsics), true)?;
},
None => {
warn!("Error reading block data.");
+3 -3
View File
@@ -242,7 +242,7 @@ tuple_impl!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W,
pub trait Hash: 'static + MaybeSerializeDebug + Clone + Eq + PartialEq { // Stupid bug in the Rust compiler believes derived
// traits must be fulfilled by all type parameters.
/// The hash type produced.
type Output: Member + AsRef<[u8]>;
type Output: Member + AsRef<[u8]> + AsMut<[u8]>;
/// Produce the hash of some byte-slice.
fn hash(s: &[u8]) -> Self::Output;
@@ -373,7 +373,7 @@ impl<T: Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'st
/// You can also create a `new` one from those fields.
pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'static {
type Number: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + SimpleArithmetic + Codec;
type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]>;
type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>;
type Hashing: Hash<Output = Self::Hash>;
type Digest: Digest<Hash = Self::Hash>;
@@ -412,7 +412,7 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'stat
pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'static {
type Extrinsic: Member + Codec;
type Header: Header<Hash=Self::Hash>;
type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]>;
type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>;
fn header(&self) -> &Self::Header;
fn extrinsics(&self) -> &[Self::Extrinsic];
+35 -35
View File
@@ -15,19 +15,19 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
// tag::description[]
//! State database maintenance. Handles finalization and pruning in the database. The input to
//! State database maintenance. Handles canonicalization and pruning in the database. The input to
//! this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that
//! were added or deleted during block execution.
//!
//! # Finalization.
//! Finalization window tracks a tree of blocks identified by header hash. The in-memory
//! # Canonicalization.
//! Canonicalization window tracks a tree of blocks identified by header hash. The in-memory
//! overlay allows to get any node that was was inserted in any any of the blocks within the window.
//! The tree is journaled to the backing database and rebuilt on startup.
//! Finalization function select one root from the top of the tree and discards all other roots and
//! Canonicalization function select one root from the top of the tree and discards all other roots and
//! their subtrees.
//!
//! # Pruning.
//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each finalization until pruning
//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning
//! constraints are satisfied.
//!
// end::description[]
@@ -38,7 +38,7 @@ extern crate parking_lot;
extern crate parity_codec as codec;
extern crate substrate_primitives as primitives;
mod unfinalized;
mod noncanonical;
mod pruning;
#[cfg(test)] mod test;
@@ -46,7 +46,7 @@ use std::fmt;
use parking_lot::RwLock;
use codec::Codec;
use std::collections::HashSet;
use unfinalized::UnfinalizedOverlay;
use noncanonical::NonCanonicalOverlay;
use pruning::RefWindow;
/// Database value type.
@@ -113,7 +113,7 @@ pub struct CommitSet<H: Hash> {
/// Pruning constraints. If none are specified pruning is
#[derive(Default, Debug, Clone)]
pub struct Constraints {
/// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only unfinalized states.
/// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states.
pub max_blocks: Option<u32>,
/// Maximum memory in the pruning overlay.
pub max_mem: Option<usize>,
@@ -124,9 +124,9 @@ pub struct Constraints {
pub enum PruningMode {
/// Maintain a pruning window.
Constrained(Constraints),
/// No pruning. Finalization is a no-op.
/// No pruning. Canonicalization is a no-op.
ArchiveAll,
/// Finalization discards unfinalized nodes. All the finalized nodes are kept in the DB.
/// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB.
ArchiveCanonical,
}
@@ -154,7 +154,7 @@ fn to_meta_key<S: Codec>(suffix: &[u8], data: &S) -> Vec<u8> {
struct StateDbSync<BlockHash: Hash, Key: Hash> {
mode: PruningMode,
unfinalized: UnfinalizedOverlay<BlockHash, Key>,
non_canonical: NonCanonicalOverlay<BlockHash, Key>,
pruning: Option<RefWindow<BlockHash, Key>>,
pinned: HashSet<BlockHash>,
}
@@ -162,7 +162,7 @@ struct StateDbSync<BlockHash: Hash, Key: Hash> {
impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
pub fn new<D: MetaDb>(mode: PruningMode, db: &D) -> Result<StateDbSync<BlockHash, Key>, Error<D::Error>> {
trace!("StateDb settings: {:?}", mode);
let unfinalized: UnfinalizedOverlay<BlockHash, Key> = UnfinalizedOverlay::new(db)?;
let non_canonical: NonCanonicalOverlay<BlockHash, Key> = NonCanonicalOverlay::new(db)?;
let pruning: Option<RefWindow<BlockHash, Key>> = match mode {
PruningMode::Constrained(Constraints {
max_mem: Some(_),
@@ -173,7 +173,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
};
Ok(StateDbSync {
mode,
unfinalized,
non_canonical,
pruning: pruning,
pinned: Default::default(),
})
@@ -196,36 +196,36 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
}
},
PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => {
self.unfinalized.insert(hash, number, parent_hash, changeset)
self.non_canonical.insert(hash, number, parent_hash, changeset)
}
}
}
pub fn finalize_block(&mut self, hash: &BlockHash) -> CommitSet<Key> {
// clear the temporary overlay from the previous finalization.
self.unfinalized.clear_overlay();
pub fn canonicalize_block(&mut self, hash: &BlockHash) -> CommitSet<Key> {
// clear the temporary overlay from the previous canonicalization.
self.non_canonical.clear_overlay();
let mut commit = match self.mode {
PruningMode::ArchiveAll => {
CommitSet::default()
},
PruningMode::ArchiveCanonical => {
let mut commit = self.unfinalized.finalize(hash);
let mut commit = self.non_canonical.canonicalize(hash);
commit.data.deleted.clear();
commit
},
PruningMode::Constrained(_) => {
self.unfinalized.finalize(hash)
self.non_canonical.canonicalize(hash)
},
};
if let Some(ref mut pruning) = self.pruning {
pruning.note_finalized(hash, &mut commit);
pruning.note_canonical(hash, &mut commit);
}
self.prune(&mut commit);
commit
}
pub fn best_finalized(&self) -> u64 {
return self.unfinalized.last_finalized_block_number()
pub fn best_canonical(&self) -> u64 {
return self.non_canonical.last_canonicalized_block_number()
}
pub fn is_pruned(&self, number: u64) -> bool {
@@ -252,7 +252,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
}
}
/// Revert all unfinalized blocks with the best block number.
/// Revert all non-canonical blocks with the best block number.
/// Returns a database commit or `None` if not possible.
/// For archive an empty commit set is returned.
pub fn revert_one(&mut self) -> Option<CommitSet<Key>> {
@@ -261,7 +261,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
Some(CommitSet::default())
},
PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => {
self.unfinalized.revert_one()
self.non_canonical.revert_one()
},
}
}
@@ -275,7 +275,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
}
pub fn get<D: HashDb<Hash=Key>>(&self, key: &Key, db: &D) -> Result<Option<DBValue>, Error<D::Error>> {
if let Some(value) = self.unfinalized.get(key) {
if let Some(value) = self.non_canonical.get(key) {
return Ok(Some(value));
}
db.get(key).map_err(|e| Error::Db(e))
@@ -296,14 +296,14 @@ impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
})
}
/// Add a new unfinalized block.
/// Add a new non-canonical block.
pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet<Key>) -> CommitSet<Key> {
self.db.write().insert_block(hash, number, parent_hash, changeset)
}
/// Finalize a previously inserted block.
pub fn finalize_block(&self, hash: &BlockHash) -> CommitSet<Key> {
self.db.write().finalize_block(hash)
pub fn canonicalize_block(&self, hash: &BlockHash) -> CommitSet<Key> {
self.db.write().canonicalize_block(hash)
}
/// Prevents pruning of specified block and its descendants.
@@ -316,12 +316,12 @@ impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
self.db.write().unpin(hash)
}
/// Get a value from unfinalized/pruning overlay or the backing DB.
/// Get a value from non-canonical/pruning overlay or the backing DB.
pub fn get<D: HashDb<Hash=Key>>(&self, key: &Key, db: &D) -> Result<Option<DBValue>, Error<D::Error>> {
self.db.read().get(key, db)
}
/// Revert all unfinalized blocks with the best block number.
/// Revert all non-canonical blocks with the best block number.
/// Returns a database commit or `None` if not possible.
/// For archive an empty commit set is returned.
pub fn revert_one(&self) -> Option<CommitSet<Key>> {
@@ -329,8 +329,8 @@ impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
}
/// Returns last finalized block number.
pub fn best_finalized(&self) -> u64 {
return self.db.read().best_finalized()
pub fn best_canonical(&self) -> u64 {
return self.db.read().best_canonical()
}
/// Check if block is pruned away.
@@ -353,10 +353,10 @@ mod tests {
db.commit(&state_db.insert_block(&H256::from(21), 2, &H256::from(1), make_changeset(&[21], &[921, 1])));
db.commit(&state_db.insert_block(&H256::from(22), 2, &H256::from(1), make_changeset(&[22], &[922])));
db.commit(&state_db.insert_block(&H256::from(3), 3, &H256::from(21), make_changeset(&[3], &[93])));
db.commit(&state_db.finalize_block(&H256::from(1)));
db.commit(&state_db.canonicalize_block(&H256::from(1)));
db.commit(&state_db.insert_block(&H256::from(4), 4, &H256::from(3), make_changeset(&[4], &[94])));
db.commit(&state_db.finalize_block(&H256::from(21)));
db.commit(&state_db.finalize_block(&H256::from(3)));
db.commit(&state_db.canonicalize_block(&H256::from(21)));
db.commit(&state_db.canonicalize_block(&H256::from(3)));
(db, state_db)
}
@@ -14,25 +14,25 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Finalization window.
//! Canonicalization window.
//! Maintains trees of block overlays and allows discarding trees/roots
//! The overlays are added in `insert` and removed in `finalize`.
//! Last finalized overlay is kept in memory until next call to `finalize` or
//! The overlays are added in `insert` and removed in `canonicalize`.
//! Last canonicalized overlay is kept in memory until next call to `canonicalize` or
//! `clear_overlay`
use std::collections::{HashMap, VecDeque};
use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key};
use codec::{Decode, Encode};
const UNFINALIZED_JOURNAL: &[u8] = b"unfinalized_journal";
const LAST_FINALIZED: &[u8] = b"last_finalized";
const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal";
const LAST_CANONICAL: &[u8] = b"last_canonical";
/// See module documentation.
pub struct UnfinalizedOverlay<BlockHash: Hash, Key: Hash> {
last_finalized: Option<(BlockHash, u64)>,
pub struct NonCanonicalOverlay<BlockHash: Hash, Key: Hash> {
last_canonicalized: Option<(BlockHash, u64)>,
levels: VecDeque<Vec<BlockOverlay<BlockHash, Key>>>,
parents: HashMap<BlockHash, BlockHash>,
last_finalized_overlay: HashMap<Key, DBValue>,
last_canonicalized_overlay: HashMap<Key, DBValue>,
}
#[derive(Encode, Decode)]
@@ -44,7 +44,7 @@ struct JournalRecord<BlockHash: Hash, Key: Hash> {
}
fn to_journal_key(block: u64, index: u64) -> Vec<u8> {
to_meta_key(UNFINALIZED_JOURNAL, &(block, index))
to_meta_key(NON_CANONICAL_JOURNAL, &(block, index))
}
#[cfg_attr(test, derive(PartialEq, Debug))]
@@ -55,20 +55,20 @@ struct BlockOverlay<BlockHash: Hash, Key: Hash> {
deleted: Vec<Key>,
}
impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
/// Creates a new instance. Does not expect any metadata to be present in the DB.
pub fn new<D: MetaDb>(db: &D) -> Result<UnfinalizedOverlay<BlockHash, Key>, Error<D::Error>> {
let last_finalized = db.get_meta(&to_meta_key(LAST_FINALIZED, &()))
pub fn new<D: MetaDb>(db: &D) -> Result<NonCanonicalOverlay<BlockHash, Key>, Error<D::Error>> {
let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &()))
.map_err(|e| Error::Db(e))?;
let last_finalized = match last_finalized {
let last_canonicalized = match last_canonicalized {
Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice()).ok_or(Error::Decoding)?),
None => None,
};
let mut levels = VecDeque::new();
let mut parents = HashMap::new();
if let Some((ref hash, mut block)) = last_finalized {
if let Some((ref hash, mut block)) = last_canonicalized {
// read the journal
trace!(target: "state-db", "Reading unfinalized journal. Last finalized #{} ({:?})", block, hash);
trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash);
let mut total: u64 = 0;
block += 1;
loop {
@@ -85,7 +85,7 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
values: record.inserted.into_iter().collect(),
deleted: record.deleted,
};
trace!(target: "state-db", "Unfinalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.values.len(), overlay.deleted.len());
trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.values.len(), overlay.deleted.len());
level.push(overlay);
parents.insert(record.hash, record.parent_hash);
index += 1;
@@ -100,29 +100,29 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
levels.push_back(level);
block += 1;
}
trace!(target: "state-db", "Finished reading unfinalized journal, {} entries", total);
trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total);
}
Ok(UnfinalizedOverlay {
last_finalized: last_finalized,
Ok(NonCanonicalOverlay {
last_canonicalized: last_canonicalized,
levels,
parents,
last_finalized_overlay: Default::default(),
last_canonicalized_overlay: Default::default(),
})
}
/// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window.
pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet<Key>) -> CommitSet<Key> {
let mut commit = CommitSet::default();
if self.levels.is_empty() && self.last_finalized.is_none() {
// assume that parent was finalized
let last_finalized = (parent_hash.clone(), number - 1);
commit.meta.inserted.push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode()));
self.last_finalized = Some(last_finalized);
} else if self.last_finalized.is_some() {
if self.levels.is_empty() && self.last_canonicalized.is_none() {
// assume that parent was canonicalized
let last_canonicalized = (parent_hash.clone(), number - 1);
commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode()));
self.last_canonicalized = Some(last_canonicalized);
} else if self.last_canonicalized.is_some() {
assert!(number >= self.front_block_number() && number < (self.front_block_number() + self.levels.len() as u64 + 1));
// check for valid parent if inserting on second level or higher
if number == self.front_block_number() {
assert!(self.last_finalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1));
assert!(self.last_canonicalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1));
} else {
assert!(self.parents.contains_key(&parent_hash));
}
@@ -153,7 +153,7 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
inserted: changeset.inserted,
deleted: changeset.deleted,
};
trace!(target: "state-db", "Inserted unfinalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len());
trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len());
let journal_record = journal_record.encode();
commit.meta.inserted.push((journal_key, journal_record));
commit
@@ -182,34 +182,34 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
}
fn front_block_number(&self) -> u64 {
self.last_finalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0)
self.last_canonicalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0)
}
pub fn last_finalized_block_number(&self) -> u64 {
self.last_finalized.as_ref().map(|&(_, n)| n).unwrap_or(0)
pub fn last_canonicalized_block_number(&self) -> u64 {
self.last_canonicalized.as_ref().map(|&(_, n)| n).unwrap_or(0)
}
/// This may be called when the last finalization commit was applied to the database.
pub fn clear_overlay(&mut self) {
self.last_finalized_overlay.clear();
self.last_canonicalized_overlay.clear();
}
/// Select a top-level root and finalized it. Discards all sibling subtrees and the root.
/// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root.
/// Returns a set of changes that need to be added to the DB.
pub fn finalize(&mut self, hash: &BlockHash) -> CommitSet<Key> {
trace!(target: "state-db", "Finalizing {:?}", hash);
let level = self.levels.pop_front().expect("no blocks to finalize");
pub fn canonicalize(&mut self, hash: &BlockHash) -> CommitSet<Key> {
trace!(target: "state-db", "Canonicalizing {:?}", hash);
let level = self.levels.pop_front().expect("no blocks to canonicalize");
let index = level.iter().position(|overlay| overlay.hash == *hash)
.expect("attempting to finalize unknown block");
.expect("attempting to canonicalize unknown block");
let mut commit = CommitSet::default();
let mut discarded_journals = Vec::new();
for (i, overlay) in level.into_iter().enumerate() {
self.parents.remove(&overlay.hash);
if i == index {
self.last_finalized_overlay = overlay.values;
// that's the one we need to finalize
commit.data.inserted = self.last_finalized_overlay.iter().map(|(k, v)| (k.clone(), v.clone())).collect();
self.last_canonicalized_overlay = overlay.values;
// that's the one we need to canonicalize
commit.data.inserted = self.last_canonicalized_overlay.iter().map(|(k, v)| (k.clone(), v.clone())).collect();
commit.data.deleted = overlay.deleted;
} else {
// TODO: borrow checker won't allow us to split out mutable refernces
@@ -223,16 +223,16 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
discarded_journals.push(overlay.journal_key);
}
commit.meta.deleted.append(&mut discarded_journals);
let last_finalized = (hash.clone(), self.front_block_number());
commit.meta.inserted.push((to_meta_key(LAST_FINALIZED, &()), last_finalized.encode()));
self.last_finalized = Some(last_finalized);
let last_canonicalized = (hash.clone(), self.front_block_number());
commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode()));
self.last_canonicalized = Some(last_canonicalized);
trace!(target: "state-db", "Discarded {} records", commit.meta.deleted.len());
commit
}
/// Get a value from the node overlay. This searches in every existing changeset.
pub fn get(&self, key: &Key) -> Option<DBValue> {
if let Some(value) = self.last_finalized_overlay.get(&key) {
if let Some(value) = self.last_canonicalized_overlay.get(&key) {
return Some(value.clone());
}
for level in self.levels.iter() {
@@ -260,30 +260,30 @@ impl<BlockHash: Hash, Key: Hash> UnfinalizedOverlay<BlockHash, Key> {
#[cfg(test)]
mod tests {
use super::UnfinalizedOverlay;
use super::NonCanonicalOverlay;
use {ChangeSet};
use primitives::H256;
use test::{make_db, make_changeset};
fn contains(overlay: &UnfinalizedOverlay<H256, H256>, key: u64) -> bool {
fn contains(overlay: &NonCanonicalOverlay<H256, H256>, key: u64) -> bool {
overlay.get(&H256::from(key)) == Some(H256::from(key).to_vec())
}
#[test]
fn created_from_empty_db() {
let db = make_db(&[]);
let overlay: UnfinalizedOverlay<H256, H256> = UnfinalizedOverlay::new(&db).unwrap();
assert_eq!(overlay.last_finalized, None);
let overlay: NonCanonicalOverlay<H256, H256> = NonCanonicalOverlay::new(&db).unwrap();
assert_eq!(overlay.last_canonicalized, None);
assert!(overlay.levels.is_empty());
assert!(overlay.parents.is_empty());
}
#[test]
#[should_panic]
fn finalize_empty_panics() {
fn canonicalize_empty_panics() {
let db = make_db(&[]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
overlay.finalize(&H256::default());
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay.canonicalize(&H256::default());
}
#[test]
@@ -292,7 +292,7 @@ mod tests {
let db = make_db(&[]);
let h1 = H256::random();
let h2 = H256::random();
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay.insert(&h1, 2, &H256::default(), ChangeSet::default());
overlay.insert(&h2, 1, &h1, ChangeSet::default());
}
@@ -303,7 +303,7 @@ mod tests {
let h1 = H256::random();
let h2 = H256::random();
let db = make_db(&[]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay.insert(&h1, 1, &H256::default(), ChangeSet::default());
overlay.insert(&h2, 3, &h1, ChangeSet::default());
}
@@ -314,27 +314,27 @@ mod tests {
let db = make_db(&[]);
let h1 = H256::random();
let h2 = H256::random();
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay.insert(&h1, 1, &H256::default(), ChangeSet::default());
overlay.insert(&h2, 2, &H256::default(), ChangeSet::default());
}
#[test]
#[should_panic]
fn finalize_unknown_panics() {
fn canonicalize_unknown_panics() {
let h1 = H256::random();
let h2 = H256::random();
let db = make_db(&[]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay.insert(&h1, 1, &H256::default(), ChangeSet::default());
overlay.finalize(&h2);
overlay.canonicalize(&h2);
}
#[test]
fn insert_finalize_one() {
fn insert_canonicalize_one() {
let h1 = H256::random();
let mut db = make_db(&[1, 2]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
let changeset = make_changeset(&[3, 4], &[2]);
let insertion = overlay.insert(&h1, 1, &H256::default(), changeset.clone());
assert_eq!(insertion.data.inserted.len(), 0);
@@ -342,7 +342,7 @@ mod tests {
assert_eq!(insertion.meta.inserted.len(), 2);
assert_eq!(insertion.meta.deleted.len(), 0);
db.commit(&insertion);
let finalization = overlay.finalize(&h1);
let finalization = overlay.canonicalize(&h1);
assert_eq!(finalization.data.inserted.len(), changeset.inserted.len());
assert_eq!(finalization.data.deleted.len(), changeset.deleted.len());
assert_eq!(finalization.meta.inserted.len(), 1);
@@ -356,40 +356,40 @@ mod tests {
let h1 = H256::random();
let h2 = H256::random();
let mut db = make_db(&[1, 2]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
db.commit(&overlay.insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])));
db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])));
assert_eq!(db.meta.len(), 3);
let overlay2 = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let overlay2 = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
assert_eq!(overlay.levels, overlay2.levels);
assert_eq!(overlay.parents, overlay2.parents);
assert_eq!(overlay.last_finalized, overlay2.last_finalized);
assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized);
}
#[test]
fn restore_from_journal_after_finalize() {
fn restore_from_journal_after_canonicalize() {
let h1 = H256::random();
let h2 = H256::random();
let mut db = make_db(&[1, 2]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
db.commit(&overlay.insert(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])));
db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])));
db.commit(&overlay.finalize(&h1));
db.commit(&overlay.canonicalize(&h1));
assert_eq!(overlay.levels.len(), 1);
let overlay2 = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let overlay2 = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
assert_eq!(overlay.levels, overlay2.levels);
assert_eq!(overlay.parents, overlay2.parents);
assert_eq!(overlay.last_finalized, overlay2.last_finalized);
assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized);
}
#[test]
fn insert_finalize_two() {
fn insert_canonicalize_two() {
let h1 = H256::random();
let h2 = H256::random();
let mut db = make_db(&[1, 2, 3, 4]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
let changeset1 = make_changeset(&[5, 6], &[2]);
let changeset2 = make_changeset(&[7, 8], &[5, 3]);
db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset1));
@@ -399,14 +399,14 @@ mod tests {
assert!(contains(&overlay, 5));
assert_eq!(overlay.levels.len(), 2);
assert_eq!(overlay.parents.len(), 2);
db.commit(&overlay.finalize(&h1));
db.commit(&overlay.canonicalize(&h1));
assert_eq!(overlay.levels.len(), 1);
assert_eq!(overlay.parents.len(), 1);
assert!(contains(&overlay, 5));
overlay.clear_overlay();
assert!(!contains(&overlay, 5));
assert!(contains(&overlay, 7));
db.commit(&overlay.finalize(&h2));
db.commit(&overlay.canonicalize(&h2));
overlay.clear_overlay();
assert_eq!(overlay.levels.len(), 0);
assert_eq!(overlay.parents.len(), 0);
@@ -442,7 +442,7 @@ mod tests {
let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[]));
let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[]));
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1));
db.commit(&overlay.insert(&h_1_1, 2, &h_1, c_1_1));
@@ -467,16 +467,16 @@ mod tests {
assert!(contains(&overlay, 211));
assert_eq!(overlay.levels.len(), 3);
assert_eq!(overlay.parents.len(), 11);
assert_eq!(overlay.last_finalized, Some((H256::default(), 0)));
assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0)));
// check if restoration from journal results in the same tree
let overlay2 = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let overlay2 = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
assert_eq!(overlay.levels, overlay2.levels);
assert_eq!(overlay.parents, overlay2.parents);
assert_eq!(overlay.last_finalized, overlay2.last_finalized);
assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized);
// finalize 1. 2 and all its children should be discarded
db.commit(&overlay.finalize(&h_1));
// canonicalize 1. 2 and all its children should be discarded
db.commit(&overlay.canonicalize(&h_1));
overlay.clear_overlay();
assert_eq!(overlay.levels.len(), 2);
assert_eq!(overlay.parents.len(), 6);
@@ -487,8 +487,8 @@ mod tests {
assert!(!contains(&overlay, 211));
assert!(contains(&overlay, 111));
// finalize 1_2. 1_1 and all its children should be discarded
db.commit(&overlay.finalize(&h_1_2));
// canonicalize 1_2. 1_1 and all its children should be discarded
db.commit(&overlay.canonicalize(&h_1_2));
overlay.clear_overlay();
assert_eq!(overlay.levels.len(), 1);
assert_eq!(overlay.parents.len(), 3);
@@ -498,13 +498,13 @@ mod tests {
assert!(contains(&overlay, 122));
assert!(contains(&overlay, 123));
// finalize 1_2_2
db.commit(&overlay.finalize(&h_1_2_2));
// canonicalize 1_2_2
db.commit(&overlay.canonicalize(&h_1_2_2));
overlay.clear_overlay();
assert_eq!(overlay.levels.len(), 0);
assert_eq!(overlay.parents.len(), 0);
assert!(db.data_eq(&make_db(&[1, 12, 122])));
assert_eq!(overlay.last_finalized, Some((h_1_2_2, 3)));
assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3)));
}
#[test]
@@ -512,7 +512,7 @@ mod tests {
let h1 = H256::random();
let h2 = H256::random();
let mut db = make_db(&[1, 2, 3, 4]);
let mut overlay = UnfinalizedOverlay::<H256, H256>::new(&db).unwrap();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
assert!(overlay.revert_one().is_none());
let changeset1 = make_changeset(&[5, 6], &[2]);
let changeset2 = make_changeset(&[7, 8], &[5, 3]);
+7 -7
View File
@@ -137,7 +137,7 @@ impl<BlockHash: Hash, Key: Hash> RefWindow<BlockHash, Key> {
}
/// Add a change set to the window. Creates a journal record and pushes it to `commit`
pub fn note_finalized(&mut self, hash: &BlockHash, commit: &mut CommitSet<Key>) {
pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet<Key>) {
trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len());
let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect();
let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new());
@@ -192,7 +192,7 @@ mod tests {
let mut pruning: RefWindow<H256, H256> = RefWindow::new(&db).unwrap();
let mut commit = make_commit(&[4, 5], &[1, 3]);
let h = H256::random();
pruning.note_finalized(&h, &mut commit);
pruning.note_canonical(&h, &mut commit);
db.commit(&commit);
assert!(commit.data.deleted.is_empty());
assert_eq!(pruning.death_rows.len(), 1);
@@ -214,10 +214,10 @@ mod tests {
let mut db = make_db(&[1, 2, 3]);
let mut pruning: RefWindow<H256, H256> = RefWindow::new(&db).unwrap();
let mut commit = make_commit(&[4], &[1]);
pruning.note_finalized(&H256::random(), &mut commit);
pruning.note_canonical(&H256::random(), &mut commit);
db.commit(&commit);
let mut commit = make_commit(&[5], &[2]);
pruning.note_finalized(&H256::random(), &mut commit);
pruning.note_canonical(&H256::random(), &mut commit);
db.commit(&commit);
assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5])));
@@ -239,13 +239,13 @@ mod tests {
let mut db = make_db(&[1, 2, 3]);
let mut pruning: RefWindow<H256, H256> = RefWindow::new(&db).unwrap();
let mut commit = make_commit(&[], &[2]);
pruning.note_finalized(&H256::random(), &mut commit);
pruning.note_canonical(&H256::random(), &mut commit);
db.commit(&commit);
let mut commit = make_commit(&[2], &[]);
pruning.note_finalized(&H256::random(), &mut commit);
pruning.note_canonical(&H256::random(), &mut commit);
db.commit(&commit);
let mut commit = make_commit(&[], &[2]);
pruning.note_finalized(&H256::random(), &mut commit);
pruning.note_canonical(&H256::random(), &mut commit);
db.commit(&commit);
assert!(db.data_eq(&make_db(&[1, 2, 3])));
+2 -2
View File
@@ -30,7 +30,7 @@ pub trait TestClient {
/// Crates new client instance for tests.
fn new_for_tests() -> Self;
/// Justify and import block to the chain.
/// Justify and import block to the chain. Instant finality.
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()>;
/// Returns hash of the genesis block.
@@ -45,7 +45,7 @@ impl TestClient for Client<Backend, Executor, runtime::Block> {
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> {
let justification = fake_justify(&block.header);
let justified = self.check_justification(block.header, justification)?;
self.import_block(origin, justified, Some(block.extrinsics))?;
self.import_block(origin, justified, Some(block.extrinsics), true)?;
Ok(())
}
+1 -1
View File
@@ -73,7 +73,7 @@ pub trait Trait: Eq + Clone {
type Origin: Into<Option<RawOrigin<Self::AccountId>>> + From<RawOrigin<Self::AccountId>>;
type Index: Parameter + Member + Default + MaybeDisplay + SimpleArithmetic + Copy;
type BlockNumber: Parameter + Member + MaybeDisplay + SimpleArithmetic + Default + Bounded + Copy + rstd::hash::Hash;
type Hash: Parameter + Member + MaybeDisplay + SimpleBitOps + Default + Copy + CheckEqual + rstd::hash::Hash + AsRef<[u8]>;
type Hash: Parameter + Member + MaybeDisplay + SimpleBitOps + Default + Copy + CheckEqual + rstd::hash::Hash + AsRef<[u8]> + AsMut<[u8]>;
type Hashing: Hash<Output = Self::Hash>;
type Digest: Parameter + Member + Default + traits::Digest<Hash = Self::Hash>;
type AccountId: Parameter + Member + MaybeDisplay + Ord + Default;