diff --git a/substrate/core/client/db/src/lib.rs b/substrate/core/client/db/src/lib.rs index 8d3b30c13d..521781fc64 100644 --- a/substrate/core/client/db/src/lib.rs +++ b/substrate/core/client/db/src/lib.rs @@ -153,6 +153,7 @@ impl BlockchainDb { let mut meta = self.meta.write(); if number == Zero::zero() { meta.genesis_hash = hash; + meta.finalized_hash = hash; } if is_best { @@ -178,6 +179,7 @@ impl client::blockchain::HeaderBackend for BlockchainDb Backend { } // write stuff to a transaction after a new block is finalized. - // - // this manages state pruning and ensuring reorgs don't occur. - // this function should only be called if the finalized block is contained - // in the best chain. - fn note_finalized(&self, transaction: &mut DBTransaction, f_header: &Block::Header, f_hash: Block::Hash) -> Result<(), client::error::Error> { - const NOTEWORTHY_FINALIZATION_GAP: u64 = 32; - - // TODO: ensure this doesn't conflict with old finalized block. + // this manages state pruning. Fails if called with a block which + // was not a child of the last finalized block. + fn note_finalized( + &self, + transaction: &mut DBTransaction, + f_header: &Block::Header, + f_hash: Block::Hash, + ) -> Result<(), client::error::Error> { let meta = self.blockchain.meta.read(); let f_num = f_header.number().clone(); + if &meta.finalized_hash != f_header.parent_hash() { + return Err(::client::error::ErrorKind::NonSequentialFinalization( + format!("Last finalized {:?} not parent of {:?}", + meta.finalized_hash, f_hash), + ).into()) + } transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, f_hash.as_ref()); - let (last_finalized_hash, last_finalized_number) - = (meta.finalized_hash.clone(), meta.finalized_number); - - let finalized_gap = f_num - last_finalized_number; - - if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP { - info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}", - (&last_finalized_hash, last_finalized_number), (&f_hash, f_num)); - } else { - debug!(target: "db", "Finalizing blocks from {:?} to {:?}", - (&last_finalized_hash, last_finalized_number), (&f_hash, f_num)); - } - - let mut canonicalize_state = |canonical_hash| { - let commit = self.storage.state_db.canonicalize_block(&canonical_hash); - apply_state_commit(transaction, commit); - }; - - // when finalizing a block, we must also implicitly finalize all the blocks - // in between the last finalized block and this one. That means canonicalizing - // all their states in order. let number_u64 = f_num.as_(); if number_u64 > self.pruning_window { let new_canonical = number_u64 - self.pruning_window; - let best_canonical = self.storage.state_db.best_canonical(); - for uncanonicalized_number in (best_canonical..new_canonical).map(|x| x + 1) { - let hash = if uncanonicalized_number == number_u64 { - f_hash - } else { - read_id::( - &*self.blockchain.db, - columns::HASH_LOOKUP, - BlockId::Number(As::sa(uncanonicalized_number)) - )?.expect("existence of block with number `new_canonical` \ - implies existence of blocks with all nubmers before it; qed") - }; + let hash = if new_canonical == number_u64 { + f_hash + } else { + read_id::( + &*self.blockchain.db, + columns::HASH_LOOKUP, + BlockId::Number(As::sa(new_canonical)) + )?.expect("existence of block with number `new_canonical` \ + implies existence of blocks with all nubmers before it; qed") + }; - trace!(target: "db", "Canonicalize block #{} ({:?})", uncanonicalized_number, hash); - canonicalize_state(hash); - } + trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); + let commit = self.storage.state_db.canonicalize_block(&hash); + apply_state_commit(transaction, commit); }; Ok(()) @@ -507,7 +492,9 @@ impl client::backend::Backend for Backend< }) } - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> Result<(), client::error::Error> { + fn commit_operation(&self, mut operation: Self::BlockImportOperation) + -> Result<(), client::error::Error> + { let mut transaction = DBTransaction::new(); if let Some(pending_block) = operation.pending_block { let hash = pending_block.header.hash(); @@ -527,19 +514,19 @@ impl client::backend::Backend for Backend< // cannot find tree route with empty DB. if meta.best_hash != Default::default() { let parent_hash = *pending_block.header.parent_hash(); - let tree_route = ::utils::tree_route::( - &*self.blockchain.db, - columns::HEADER, - meta.best_hash, - parent_hash, + let tree_route = ::client::blockchain::tree_route( + &self.blockchain, + BlockId::Hash(meta.best_hash), + BlockId::Hash(parent_hash), )?; // update block number to hash lookup entries. for retracted in tree_route.retracted() { if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? - warn!("Safety failure: reverting finalized block {:?}", + warn!("Potential safety failure: reverting finalized block {:?}", (&retracted.number, &retracted.hash)); + + return Err(::client::error::ErrorKind::NotInFinalizedChain.into()); } transaction.delete( @@ -699,6 +686,60 @@ mod tests { type Block = RawBlock; + fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + let mut changes_root = H256::default(); + let mut changes_trie_update = MemoryDB::::new(); + { + let mut trie = TrieDBMut::::new( + &mut changes_trie_update, + &mut changes_root + ); + for (key, value) in changes { + trie.insert(&key, &value).unwrap(); + } + } + + (changes_root, changes_trie_update) + } + + fn insert_header( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Vec<(Vec, Vec)>, + extrinsics_root: H256, + ) -> H256 { + use runtime_primitives::generic::DigestItem; + use runtime_primitives::testing::Digest; + + let (changes_root, changes_trie_update) = prepare_changes(changes); + let digest = Digest { + logs: vec![ + DigestItem::ChangesTrieRoot(changes_root), + ], + }; + let header = Header { + number, + parent_hash, + state_root: Default::default(), + digest, + extrinsics_root, + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation(block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + op.update_changes_trie(changes_trie_update).unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + #[test] fn block_hash_inserted_correctly() { let db = Backend::::new_test(1); @@ -924,6 +965,7 @@ mod tests { assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_some()); } + backend.finalize_block(BlockId::Number(1)).unwrap(); backend.finalize_block(BlockId::Number(2)).unwrap(); assert!(backend.storage.db.get(::columns::STATE, &key.0[..]).unwrap().is_none()); } @@ -932,54 +974,6 @@ mod tests { fn changes_trie_storage_works() { let backend = Backend::::new_test(1000); - let prepare_changes = |changes: Vec<(Vec, Vec)>| { - let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::new(); - { - let mut trie = TrieDBMut::::new( - &mut changes_trie_update, - &mut changes_root - ); - for (key, value) in changes { - trie.insert(&key, &value).unwrap(); - } - } - - (changes_root, changes_trie_update) - }; - - let insert_header = |number: u64, parent_hash: H256, changes: Vec<(Vec, Vec)>| { - use runtime_primitives::generic::DigestItem; - use runtime_primitives::testing::Digest; - - let (changes_root, changes_trie_update) = prepare_changes(changes); - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(changes_root), - ], - }; - let header = Header { - number, - parent_hash, - state_root: Default::default(), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation(block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie(changes_trie_update).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - }; - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { let (changes_root, mut changes_trie_update) = prepare_changes(changes); assert_eq!(backend.tries_change_storage.root(block), Ok(Some(changes_root))); @@ -996,13 +990,76 @@ mod tests { ]; let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - let block0 = insert_header(0, Default::default(), changes0.clone()); - let block1 = insert_header(1, block0, changes1.clone()); - let _ = insert_header(2, block1, changes2.clone()); + let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); + let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); // check that the storage contains tries for all blocks check_changes(&backend, 0, changes0); check_changes(&backend, 1, changes1); check_changes(&backend, 2, changes2); } + + #[test] + fn tree_route_works() { + let backend = Backend::::new_test(1000); + let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + + // fork from genesis: 3 prong. + let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); + let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); + let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); + + // fork from genesis: 2 prong. + let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a3), + BlockId::Hash(b2) + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); + assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a1), + BlockId::Hash(a3), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a3), + BlockId::Hash(a1), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = ::client::blockchain::tree_route( + backend.blockchain(), + BlockId::Hash(a2), + BlockId::Hash(a2), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } } diff --git a/substrate/core/client/db/src/light.rs b/substrate/core/client/db/src/light.rs index 7d9d298833..cac32b8e6e 100644 --- a/substrate/core/client/db/src/light.rs +++ b/substrate/core/client/db/src/light.rs @@ -118,20 +118,18 @@ impl LightStorage is_finalized: bool, ) { let mut meta = self.meta.write(); - if is_best { - if number == <::Header as HeaderT>::Number::zero() { - meta.genesis_hash = hash; - } + if number == Zero::zero() { + meta.genesis_hash = hash; + meta.finalized_hash = hash; + } + + if is_best { meta.best_number = number; meta.best_hash = hash; } if is_finalized { - if number == <::Header as HeaderT>::Number::zero() { - meta.genesis_hash = hash; - } - meta.finalized_number = number; meta.finalized_hash = hash; } @@ -152,6 +150,7 @@ impl BlockchainHeaderBackend for LightStorage best_hash: meta.best_hash, best_number: meta.best_number, genesis_hash: meta.genesis_hash, + finalized_hash: meta.finalized_hash, }) } @@ -184,75 +183,43 @@ impl BlockchainHeaderBackend for LightStorage } impl LightStorage { - // note that a block is finalized. ensure that best chain contains the finalized - // block number first. + // note that a block is finalized. only call with child of last finalized block. fn note_finalized(&self, transaction: &mut DBTransaction, header: &Block::Header, hash: Block::Hash) -> ClientResult<()> { - const NOTEWORTHY_FINALIZATION_GAP: u64 = 32; - - // TODO: ensure this doesn't conflict with old finalized block. let meta = self.meta.read(); - let f_num = header.number().clone(); - let number_u64: u64 = f_num.as_(); - transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref()); - - let (last_finalized_hash, last_finalized_number) - = (meta.finalized_hash.clone(), meta.finalized_number); - - let finalized_gap = f_num - last_finalized_number; - - if finalized_gap.as_() >= NOTEWORTHY_FINALIZATION_GAP { - info!(target: "db", "Finalizing large run of blocks from {:?} to {:?}", - (&last_finalized_hash, last_finalized_number), (&hash, f_num)); - } else { - debug!(target: "db", "Finalizing blocks from {:?} to {:?}", - (&last_finalized_hash, last_finalized_number), (&hash, f_num)); + if &meta.finalized_hash != header.parent_hash() { + return Err(::client::error::ErrorKind::NonSequentialFinalization( + format!("Last finalized {:?} not parent of {:?}", + meta.finalized_hash, hash), + ).into()) } + transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref()); + // build new CHT if required - let mut build_cht = |header: &Block::Header| -> ClientResult<()> { - if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) { - let new_cht_start: NumberFor = cht::start_number(cht::SIZE, new_cht_number); - let new_cht_root = cht::compute_root::( - cht::SIZE, new_cht_number, (new_cht_start.as_()..) - .map(|num| self.hash(As::sa(num)).unwrap_or_default()) - ); + if let Some(new_cht_number) = cht::is_build_required(cht::SIZE, *header.number()) { + let new_cht_start: NumberFor = cht::start_number(cht::SIZE, new_cht_number); + let new_cht_root = cht::compute_root::( + cht::SIZE, new_cht_number, (new_cht_start.as_()..) + .map(|num| self.hash(As::sa(num)).unwrap_or_default()) + ); - if let Some(new_cht_root) = new_cht_root { - transaction.put(columns::CHT, &number_to_lookup_key(new_cht_start), new_cht_root.as_ref()); + if let Some(new_cht_root) = new_cht_root { + transaction.put(columns::CHT, &number_to_lookup_key(new_cht_start), new_cht_root.as_ref()); - let mut prune_block = new_cht_start; - let new_cht_end = cht::end_number(cht::SIZE, new_cht_number); - trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number); + let mut prune_block = new_cht_start; + let new_cht_end = cht::end_number(cht::SIZE, new_cht_number); + trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number); - while prune_block <= new_cht_end { - let id = read_id::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?; - if let Some(hash) = id { - let lookup_key = number_to_lookup_key(prune_block); - transaction.delete(columns::HASH_LOOKUP, &lookup_key); - transaction.delete(columns::HEADER, hash.as_ref()); - } - prune_block += <::Header as HeaderT>::Number::one(); + while prune_block <= new_cht_end { + let id = read_id::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?; + if let Some(hash) = id { + let lookup_key = number_to_lookup_key(prune_block); + transaction.delete(columns::HASH_LOOKUP, &lookup_key); + transaction.delete(columns::HEADER, hash.as_ref()); } + prune_block += <::Header as HeaderT>::Number::one(); } } - - Ok(()) - }; - - // attempt to build CHT for all newly finalized blocks. - let last_finalized_u64 = last_finalized_number.as_(); - for num in (last_finalized_u64..number_u64).map(|x| x + 1) { - let num = As::sa(num); - if num == f_num { - build_cht(header)?; - } else { - let old_header = match self.header(BlockId::Number(num))? { - Some(x) => x, - None => panic!("finalizing block {} implies existence of block {}; qed", f_num, num), - }; - - build_cht(&old_header)?; - } } Ok(()) @@ -260,8 +227,7 @@ impl LightStorage { } impl LightBlockchainStorage for LightStorage - where - Block: BlockT, + where Block: BlockT, { fn import_header( &self, @@ -285,11 +251,10 @@ impl LightBlockchainStorage for LightStorage let meta = self.meta.read(); if meta.best_hash != Default::default() { let parent_hash = *header.parent_hash(); - let tree_route = ::utils::tree_route::( - &*self.db, - columns::HEADER, - meta.best_hash, - parent_hash, + let tree_route = ::client::blockchain::tree_route( + self, + BlockId::Hash(meta.best_hash), + BlockId::Hash(parent_hash), )?; // update block number to hash lookup entries. @@ -377,6 +342,26 @@ pub(crate) mod tests { type Block = RawBlock; + pub fn insert_block_with_extrinsics_root( + db: &LightStorage, + parent: &Hash, + number: u64, + authorities: Option>, + extrinsics_root: Hash, + ) -> Hash { + let header = Header { + number: number.into(), + parent_hash: *parent, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root, + }; + + let hash = header.hash(); + db.import_header(header, authorities, NewBlockState::Best).unwrap(); + hash + } + pub fn insert_block( db: &LightStorage, parent: &Hash, @@ -486,6 +471,9 @@ pub(crate) mod tests { assert_eq!(db.db.iter(columns::CHT).count(), 0); // now finalize the block. + for i in (0..(cht::SIZE + cht::SIZE)).map(|i| i + 1) { + db.finalize_header(BlockId::Number(i)).unwrap(); + } db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize); assert_eq!(db.db.iter(columns::CHT).count(), 1); @@ -507,16 +495,79 @@ pub(crate) mod tests { let db = LightStorage::new_test(); // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = Default::default(); - for i in 0..1 + cht::SIZE + cht::SIZE + 1 { + let mut prev_hash = insert_block(&db, &Default::default(), 0, None); + for i in 1..1 + cht::SIZE + cht::SIZE + 1 { prev_hash = insert_block(&db, &prev_hash, i as u64, None); + db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); } - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); let cht_root_1 = db.cht_root(cht::SIZE, cht::start_number(cht::SIZE, 0)).unwrap(); let cht_root_2 = db.cht_root(cht::SIZE, (cht::start_number(cht::SIZE, 0) + cht::SIZE / 2) as u64).unwrap(); let cht_root_3 = db.cht_root(cht::SIZE, cht::end_number(cht::SIZE, 0)).unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } + + #[test] + fn tree_route_works() { + let db = LightStorage::new_test(); + let block0 = insert_block(&db, &Default::default(), 0, None); + + // fork from genesis: 3 prong. + let a1 = insert_block(&db, &block0, 1, None); + let a2 = insert_block(&db, &a1, 2, None); + let a3 = insert_block(&db, &a2, 3, None); + + // fork from genesis: 2 prong. + let b1 = insert_block_with_extrinsics_root(&db, &block0, 1, None, Hash::from([1; 32])); + let b2 = insert_block(&db, &b1, 2, None); + + { + let tree_route = ::client::blockchain::tree_route( + &db, + BlockId::Hash(a3), + BlockId::Hash(b2) + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); + assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + } + + { + let tree_route = ::client::blockchain::tree_route( + &db, + BlockId::Hash(a1), + BlockId::Hash(a3), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + } + + { + let tree_route = ::client::blockchain::tree_route( + &db, + BlockId::Hash(a3), + BlockId::Hash(a1), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = ::client::blockchain::tree_route( + &db, + BlockId::Hash(a2), + BlockId::Hash(a2), + ).unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } } diff --git a/substrate/core/client/src/backend.rs b/substrate/core/client/src/backend.rs index 0a55a44949..aec4572b6c 100644 --- a/substrate/core/client/src/backend.rs +++ b/substrate/core/client/src/backend.rs @@ -108,11 +108,8 @@ where fn begin_operation(&self, block: BlockId) -> error::Result; /// Commit block insertion. fn commit_operation(&self, transaction: Self::BlockImportOperation) -> error::Result<()>; - /// Finalize block with given Id. This should also implicitly finalize all ancestors. - /// - /// If the finalized block is not an ancestor of the current "best block", then - /// the chain will be implicitly reorganized to the best chain containing the newly - /// finalized block. + /// Finalize block with given Id. This should only be called if the parent of the given + /// block has been finalized. fn finalize_block(&self, block: BlockId) -> error::Result<()>; /// Returns reference to blockchain backend. fn blockchain(&self) -> &Self::Blockchain; diff --git a/substrate/core/client/src/blockchain.rs b/substrate/core/client/src/blockchain.rs index d1af15a586..7a9904f8a0 100644 --- a/substrate/core/client/src/blockchain.rs +++ b/substrate/core/client/src/blockchain.rs @@ -77,11 +77,13 @@ pub enum ImportResult { #[derive(Debug)] pub struct Info { /// Best block hash. - pub best_hash: <::Header as HeaderT>::Hash, + pub best_hash: Block::Hash, /// Best block number. pub best_number: <::Header as HeaderT>::Number, /// Genesis block hash. - pub genesis_hash: <::Header as HeaderT>::Hash, + pub genesis_hash: Block::Hash, + /// The head of the finalized chain. + pub finalized_hash: Block::Hash, } /// Block status. @@ -92,3 +94,129 @@ pub enum BlockStatus { /// Not in the queue or the blockchain. Unknown, } + +/// An entry in a tree route. +#[derive(Debug)] +pub struct RouteEntry { + /// The number of the block. + pub number: ::Number, + /// The hash of the block. + pub hash: Block::Hash, +} + +/// A tree-route from one block to another in the chain. +/// +/// All blocks prior to the pivot in the deque is the reverse-order unique ancestry +/// of the first block, the block at the pivot index is the common ancestor, +/// and all blocks after the pivot is the ancestry of the second block, in +/// order. +/// +/// The ancestry sets will include the given blocks, and thus the tree-route is +/// never empty. +/// +/// ```ignore +/// Tree route from R1 to E2. Retracted is [R1, R2, R3], Common is C, enacted [E1, E2] +/// <- R3 <- R2 <- R1 +/// / +/// C +/// \-> E1 -> E2 +/// ``` +/// +/// ```ignore +/// Tree route from C to E2. Retracted empty. Common is C, enacted [E1, E2] +/// C -> E1 -> E2 +/// ``` +#[derive(Debug)] +pub struct TreeRoute { + route: Vec>, + pivot: usize, +} + +impl TreeRoute { + /// Get a slice of all retracted blocks in reverse order (towards common ancestor) + pub fn retracted(&self) -> &[RouteEntry] { + &self.route[..self.pivot] + } + + /// Get the common ancestor block. This might be one of the two blocks of the + /// route. + pub fn common_block(&self) -> &RouteEntry { + self.route.get(self.pivot).expect("tree-routes are computed between blocks; \ + which are included in the route; \ + thus it is never empty; qed") + } + + /// Get a slice of enacted blocks (descendents of the common ancestor) + pub fn enacted(&self) -> &[RouteEntry] { + &self.route[self.pivot + 1 ..] + } +} + +/// Compute a tree-route between two blocks. See tree-route docs for more details. +pub fn tree_route>( + backend: &Backend, + from: BlockId, + to: BlockId, +) -> Result> { + use runtime_primitives::traits::Header; + + let load_header = |id: BlockId| { + match backend.header(id) { + Ok(Some(hdr)) => Ok(hdr), + Ok(None) => Err(ErrorKind::UnknownBlock(format!("Unknown block {:?}", id)).into()), + Err(e) => Err(e), + } + }; + + let mut from = load_header(from)?; + let mut to = load_header(to)?; + + let mut from_branch = Vec::new(); + let mut to_branch = Vec::new(); + + while to.number() > from.number() { + to_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + + to = load_header(BlockId::Hash(*to.parent_hash()))?; + } + + while from.number() > to.number() { + from_branch.push(RouteEntry { + number: from.number().clone(), + hash: from.hash(), + }); + from = load_header(BlockId::Hash(*from.parent_hash()))?; + } + + // numbers are equal now. walk backwards until the block is the same + + while to != from { + to_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + to = load_header(BlockId::Hash(*to.parent_hash()))?; + + from_branch.push(RouteEntry { + number: from.number().clone(), + hash: from.hash(), + }); + from = load_header(BlockId::Hash(*from.parent_hash()))?; + } + + // add the pivot block. and append the reversed to-branch (note that it's reverse order originalls) + let pivot = from_branch.len(); + from_branch.push(RouteEntry { + number: to.number().clone(), + hash: to.hash(), + }); + from_branch.extend(to_branch.into_iter().rev()); + + Ok(TreeRoute { + route: from_branch, + pivot, + }) +} diff --git a/substrate/core/client/src/client.rs b/substrate/core/client/src/client.rs index a2128aad93..43d04c2b72 100644 --- a/substrate/core/client/src/client.rs +++ b/substrate/core/client/src/client.rs @@ -21,7 +21,7 @@ use futures::sync::mpsc; use parking_lot::{Mutex, RwLock}; use primitives::AuthorityId; use runtime_primitives::{bft::Justification, generic::{BlockId, SignedBlock, Block as RuntimeBlock}}; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, One, As, NumberFor, CurrentHeight, BlockNumberToHash}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, As, NumberFor, CurrentHeight, BlockNumberToHash}; use runtime_primitives::BuildStorage; use primitives::{Blake2Hasher, RlpCodec, H256}; use primitives::storage::{StorageKey, StorageData}; @@ -40,7 +40,10 @@ use notifications::{StorageNotifications, StorageEventStream}; use {cht, error, in_mem, block_builder, bft, genesis}; /// Type that implements `futures::Stream` of block import events. -pub type BlockchainEventStream = mpsc::UnboundedReceiver>; +pub type ImportNotifications = mpsc::UnboundedReceiver>; + +/// A stream of block finality notifications. +pub type FinalityNotifications = mpsc::UnboundedReceiver>; /// Substrate Client pub struct Client where Block: BlockT { @@ -48,6 +51,7 @@ pub struct Client where Block: BlockT { executor: E, storage_notifications: Mutex>, import_notification_sinks: Mutex>>>, + finality_notification_sinks: Mutex>>>, import_lock: Mutex<()>, importing_block: RwLock>, // holds the block hash currently being imported. TODO: replace this with block queue execution_strategy: ExecutionStrategy, @@ -55,8 +59,13 @@ pub struct Client where Block: BlockT { /// A source of blockchain evenets. pub trait BlockchainEvents { - /// Get block import event stream. - fn import_notification_stream(&self) -> BlockchainEventStream; + /// Get block import event stream. Not guaranteed to be fired for every + /// imported block. + fn import_notification_stream(&self) -> ImportNotifications; + + /// Get a stream of finality notifications. Not guaranteed to be fired for every + /// finalized block. + fn finality_notification_stream(&self) -> FinalityNotifications; /// Get storage changes event stream. /// @@ -146,6 +155,15 @@ pub struct BlockImportNotification { pub is_new_best: bool, } +/// Summary of a finalized block. +#[derive(Clone, Debug)] +pub struct FinalityNotification { + /// Imported block header hash. + pub hash: Block::Hash, + /// Imported block header. + pub header: Block::Header, +} + /// A header paired with a justification which has already been checked. #[derive(Debug, PartialEq, Eq, Clone)] pub struct JustifiedHeader { @@ -208,6 +226,7 @@ impl Client where executor, storage_notifications: Default::default(), import_notification_sinks: Default::default(), + finality_notification_sinks: Default::default(), import_lock: Default::default(), importing_block: Default::default(), execution_strategy, @@ -436,6 +455,25 @@ impl Client where blockchain::BlockStatus::Unknown => {}, } + let (last_best, last_best_number) = { + let info = self.backend.blockchain().info()?; + (info.best_hash, info.best_number) + }; + + // this is a fairly arbitrary choice of where to draw the line on making notifications, + // but the general goal is to only make notifications when we are already fully synced + // and get a new chain head. + let make_notifications = match origin { + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, + BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, + }; + + // ensure parent block is finalized to maintain invariant that + // finality is called sequentially. + if finalized { + self.apply_finality(parent_hash, last_best, make_notifications)?; + } + let mut transaction = self.backend.begin_operation(BlockId::Hash(parent_hash))?; let (storage_update, changes_update, storage_changes) = match transaction.state()? { Some(transaction_state) => { @@ -470,7 +508,8 @@ impl Client where None => (None, None, None) }; - let is_new_best = header.number() == &(self.backend.blockchain().info()?.best_number + One::one()); + // TODO: non longest-chain rule. + let is_new_best = finalized || header.number() > &last_best_number; let leaf_state = if finalized { ::backend::NewBlockState::Final } else if is_new_best { @@ -498,26 +537,112 @@ impl Client where } self.backend.commit_operation(transaction)?; - if origin == BlockOrigin::NetworkBroadcast || origin == BlockOrigin::Own || origin == BlockOrigin::ConsensusBroadcast { - + if make_notifications { if let Some(storage_changes) = storage_changes { // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? self.storage_notifications.lock() .trigger(&hash, storage_changes); } + if finalized { + let notification = FinalityNotification:: { + hash, + header: header.clone(), + }; + + self.finality_notification_sinks.lock() + .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + } + let notification = BlockImportNotification:: { - hash: hash, - origin: origin, - header: header, - is_new_best: is_new_best, + hash, + origin, + header, + is_new_best, }; + self.import_notification_sinks.lock() .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); } + Ok(ImportResult::Queued) } + /// Finalizes all blocks up to given. + fn apply_finality(&self, block: Block::Hash, best_block: Block::Hash, notify: bool) -> error::Result<()> { + // find tree route from last finalized to given block. + let last_finalized = self.backend.blockchain().last_finalized()?; + + if block == last_finalized { return Ok(()) } + let route_from_finalized = ::blockchain::tree_route( + self.backend.blockchain(), + BlockId::Hash(last_finalized), + BlockId::Hash(block), + )?; + + if let Some(retracted) = route_from_finalized.retracted().get(0) { + warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", retracted, last_finalized); + + bail!(error::ErrorKind::NotInFinalizedChain); + } + + let route_from_best = ::blockchain::tree_route( + self.backend.blockchain(), + BlockId::Hash(best_block), + BlockId::Hash(block), + )?; + + // if the block is not a direct ancestor of the current best chain, + // then some other block is the common ancestor. + if route_from_best.common_block().hash != block { + // TODO: reorganize best block to be the best chain containing + // `block`. + } + + for finalize_new in route_from_finalized.enacted() { + self.backend.finalize_block(BlockId::Hash(finalize_new.hash))?; + } + + if notify { + // sometimes when syncing, tons of blocks can be finalized at once. + // we'll send notifications spuriously in that case. + const MAX_TO_NOTIFY: usize = 256; + let enacted = route_from_finalized.enacted(); + let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY); + let mut sinks = self.finality_notification_sinks.lock(); + for finalized in &enacted[start..] { + let header = self.header(&BlockId::Hash(finalized.hash))? + .expect("header already known to exist in DB because it is indicated in the tree route; qed"); + let notification = FinalityNotification { + header, + hash: finalized.hash, + }; + + sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + } + } + + Ok(()) + } + + /// Finalize a block. This will implicitly finalize all blocks up to it and + /// fire finality notifications. + /// + /// Pass a flag to indicate whether finality notifications should be propagated. + /// This is usually tied to some synchronization state, where we don't send notifications + /// while performing major synchronization work. + pub fn finalize_block(&self, id: BlockId, notify: bool) -> error::Result<()> { + let last_best = self.backend.blockchain().info()?.best_hash; + let to_finalize_hash = match id { + BlockId::Hash(h) => h, + BlockId::Number(n) => self.backend.blockchain().hash(n)? + .ok_or_else(|| error::ErrorKind::UnknownBlock(format!("No block with number {:?}", n)))?, + }; + + self.apply_finality(to_finalize_hash, last_best, notify) + } + /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were /// successfully reverted. pub fn revert(&self, n: NumberFor) -> error::Result> { @@ -681,12 +806,18 @@ where Block: BlockT, { /// Get block import event stream. - fn import_notification_stream(&self) -> BlockchainEventStream { + fn import_notification_stream(&self) -> ImportNotifications { let (sink, stream) = mpsc::unbounded(); self.import_notification_sinks.lock().push(sink); stream } + fn finality_notification_stream(&self) -> FinalityNotifications { + let (sink, stream) = mpsc::unbounded(); + self.finality_notification_sinks.lock().push(sink); + stream + } + /// Get storage changes event stream. fn storage_changes_notification_stream(&self, filter_keys: Option<&[StorageKey]>) -> error::Result> { Ok(self.storage_notifications.lock().listen(filter_keys)) diff --git a/substrate/core/client/src/error.rs b/substrate/core/client/src/error.rs index 05429b9566..3dd79d79e6 100644 --- a/substrate/core/client/src/error.rs +++ b/substrate/core/client/src/error.rs @@ -99,6 +99,18 @@ error_chain! { description("Error decoding call result") display("Error decoding call result of {}", method) } + + /// Last finalized block not parent of current. + NonSequentialFinalization(s: String) { + description("Did not finalize blocks in sequential order."), + display("Did not finalize blocks in sequential order."), + } + + /// Safety violation: new best block not descendent of last finalized. + NotInFinalizedChain { + description("Potential long-range attack: block not in finalized chain."), + display("Potential long-range attack: block not in finalized chain."), + } } } diff --git a/substrate/core/client/src/in_mem.rs b/substrate/core/client/src/in_mem.rs index ef1ea954df..4c321db09c 100644 --- a/substrate/core/client/src/in_mem.rs +++ b/substrate/core/client/src/in_mem.rs @@ -221,6 +221,7 @@ impl HeaderBackend for Blockchain { best_hash: storage.best_hash, best_number: storage.best_number, genesis_hash: storage.genesis_hash, + finalized_hash: storage.finalized_hash, }) } diff --git a/substrate/core/client/src/lib.rs b/substrate/core/client/src/lib.rs index 37f1f2ba6b..c2467bc492 100644 --- a/substrate/core/client/src/lib.rs +++ b/substrate/core/client/src/lib.rs @@ -61,7 +61,7 @@ pub use blockchain::Info as ChainInfo; pub use call_executor::{CallResult, CallExecutor, LocalCallExecutor}; pub use client::{ new_in_mem, - BlockBody, BlockStatus, BlockOrigin, BlockchainEventStream, BlockchainEvents, + BlockBody, BlockStatus, BlockOrigin, ImportNotifications, FinalityNotifications, BlockchainEvents, Client, ClientInfo, ChainHead, ImportResult, JustifiedHeader, }; diff --git a/substrate/core/network/src/chain.rs b/substrate/core/network/src/chain.rs index 5d162cef78..be8e4ba18d 100644 --- a/substrate/core/network/src/chain.rs +++ b/substrate/core/network/src/chain.rs @@ -26,7 +26,14 @@ use primitives::{Blake2Hasher, RlpCodec}; /// Local client abstraction for the network. pub trait Client: Send + Sync { /// Import a new block. Parent is supposed to be existing in the blockchain. - fn import(&self, origin: BlockOrigin, header: Block::Header, justification: Justification, body: Option>) -> Result; + fn import( + &self, + origin: BlockOrigin, + header: Block::Header, + justification: Justification, + body: Option>, + finalized: bool, + ) -> Result; /// Get blockchain info. fn info(&self) -> Result, Error>; @@ -61,10 +68,17 @@ impl Client for SubstrateClient where E: CallExecutor + Send + Sync + 'static, Block: BlockT, { - fn import(&self, origin: BlockOrigin, header: Block::Header, justification: Justification, body: Option>) -> Result { - // TODO: defer justification check and non-instant finality. + fn import( + &self, + origin: BlockOrigin, + header: Block::Header, + justification: Justification, + body: Option>, + finalized: bool, + ) -> Result { + // TODO: defer justification check and add finality. let justified_header = self.check_justification(header, justification.into())?; - (self as &SubstrateClient).import_block(origin, justified_header, body, true) + (self as &SubstrateClient).import_block(origin, justified_header, body, finalized) } fn info(&self) -> Result, Error> { diff --git a/substrate/core/network/src/import_queue.rs b/substrate/core/network/src/import_queue.rs index 647070383b..1e1149dc6f 100644 --- a/substrate/core/network/src/import_queue.rs +++ b/substrate/core/network/src/import_queue.rs @@ -59,6 +59,7 @@ pub struct ImportQueueStatus { pub struct AsyncImportQueue { handle: Mutex>>, data: Arc>, + instant_finality: bool, } /// Locks order: queue, queue_blocks, best_importing_number @@ -71,10 +72,11 @@ struct AsyncImportQueueData { } impl AsyncImportQueue { - pub fn new() -> Self { + pub fn new(instant_finality: bool) -> Self { Self { handle: Mutex::new(None), data: Arc::new(AsyncImportQueueData::new()), + instant_finality, } } @@ -82,8 +84,9 @@ impl AsyncImportQueue { debug_assert!(self.handle.lock().is_none()); let qdata = self.data.clone(); + let instant_finality = self.instant_finality; *self.handle.lock() = Some(::std::thread::Builder::new().name("ImportQueue".into()).spawn(move || { - import_thread(sync, service, chain, qdata) + import_thread(sync, service, chain, qdata, instant_finality) }).map_err(|err| Error::from(ErrorKind::Io(err)))?); Ok(()) } @@ -159,7 +162,13 @@ impl Drop for AsyncImportQueue { } /// Blocks import thread. -fn import_thread>(sync: Weak>>, service: Weak, chain: Weak>, qdata: Arc>) { +fn import_thread>( + sync: Weak>>, + service: Weak, + chain: Weak>, + qdata: Arc>, + instant_finality: bool, +) { trace!(target: "sync", "Starting import thread"); loop { if qdata.is_stopping.load(Ordering::SeqCst) { @@ -181,7 +190,12 @@ fn import_thread>(sync: Weak { let blocks_hashes: Vec = new_blocks.1.iter().map(|b| b.block.hash.clone()).collect(); - if !import_many_blocks(&mut SyncLink::Indirect(&sync, &*chain, &*service), Some(&*qdata), new_blocks) { + if !import_many_blocks( + &mut SyncLink::Indirect(&sync, &*chain, &*service), + Some(&*qdata), + new_blocks, + instant_finality, + ) { break; } @@ -246,7 +260,8 @@ enum BlockImportError { fn import_many_blocks<'a, B: BlockT>( link: &mut SyncLinkApi, qdata: Option<&AsyncImportQueueData>, - blocks: (BlockOrigin, Vec>) + blocks: (BlockOrigin, Vec>), + instant_finality: bool, ) -> bool { let (blocks_origin, blocks) = blocks; @@ -265,7 +280,12 @@ fn import_many_blocks<'a, B: BlockT>( // Blocks in the response/drain should be in ascending order. for block in blocks { - let import_result = import_single_block(link.chain(), blocks_origin.clone(), block); + let import_result = import_single_block( + link.chain(), + blocks_origin.clone(), + block, + instant_finality, + ); let is_import_failed = import_result.is_err(); imported += process_import_result(link, import_result); if is_import_failed { @@ -287,7 +307,8 @@ fn import_many_blocks<'a, B: BlockT>( fn import_single_block( chain: &Client, block_origin: BlockOrigin, - block: BlockData + block: BlockData, + instant_finality: bool, ) -> Result::Header as HeaderT>::Number>, BlockImportError> { let origin = block.origin; @@ -303,6 +324,7 @@ fn import_single_block( header, justification, block.body, + instant_finality, ); match result { Ok(ImportResult::AlreadyInChain) => { @@ -436,7 +458,8 @@ pub mod tests { use super::*; /// Blocks import queue that is importing blocks in the same thread. - pub struct SyncImportQueue; + /// The boolean value indicates whether blocks should be imported without instant finality. + pub struct SyncImportQueue(pub bool); struct DummyExecuteInContext; impl ExecuteInContext for DummyExecuteInContext { @@ -460,7 +483,7 @@ pub mod tests { } fn import_blocks(&self, sync: &mut ChainSync, protocol: &mut Context, blocks: (BlockOrigin, Vec>)) { - import_many_blocks(&mut SyncLink::Direct::<_, DummyExecuteInContext>(sync, protocol), None, blocks); + import_many_blocks(&mut SyncLink::Direct::<_, DummyExecuteInContext>(sync, protocol), None, blocks, self.0); } } @@ -518,27 +541,39 @@ pub mod tests { #[test] fn import_single_good_block_works() { let (_, hash, number, block) = prepare_good_block(); - assert_eq!(import_single_block(&test_client::new(), BlockOrigin::File, block), Ok(BlockImportResult::ImportedUnknown(hash, number))); + assert_eq!( + import_single_block(&test_client::new(), BlockOrigin::File, block, true), + Ok(BlockImportResult::ImportedUnknown(hash, number)) + ); } #[test] fn import_single_good_known_block_is_ignored() { let (client, hash, number, block) = prepare_good_block(); - assert_eq!(import_single_block(&client, BlockOrigin::File, block), Ok(BlockImportResult::ImportedKnown(hash, number))); + assert_eq!( + import_single_block(&client, BlockOrigin::File, block, true), + Ok(BlockImportResult::ImportedKnown(hash, number)) + ); } #[test] fn import_single_good_block_without_header_fails() { let (_, _, _, mut block) = prepare_good_block(); block.block.header = None; - assert_eq!(import_single_block(&test_client::new(), BlockOrigin::File, block), Err(BlockImportError::Disconnect(0))); + assert_eq!( + import_single_block(&test_client::new(), BlockOrigin::File, block, true), + Err(BlockImportError::Disconnect(0)) + ); } #[test] fn import_single_good_block_without_justification_fails() { let (_, _, _, mut block) = prepare_good_block(); block.block.justification = None; - assert_eq!(import_single_block(&test_client::new(), BlockOrigin::File, block), Err(BlockImportError::Disconnect(0))); + assert_eq!( + import_single_block(&test_client::new(), BlockOrigin::File, block, true), + Err(BlockImportError::Disconnect(0)) + ); } #[test] @@ -579,12 +614,17 @@ pub mod tests { let (_, _, _, block) = prepare_good_block(); let qdata = AsyncImportQueueData::new(); qdata.is_stopping.store(true, Ordering::SeqCst); - assert!(!import_many_blocks(&mut TestLink::new(), Some(&qdata), (BlockOrigin::File, vec![block.clone(), block]))); + assert!(!import_many_blocks( + &mut TestLink::new(), + Some(&qdata), + (BlockOrigin::File, vec![block.clone(), block]), + true + )); } #[test] fn async_import_queue_drops() { - let queue = AsyncImportQueue::new(); + let queue = AsyncImportQueue::new(true); let service = Arc::new(DummyExecutor); let chain = Arc::new(test_client::new()); queue.start(Weak::new(), Arc::downgrade(&service), Arc::downgrade(&chain) as Weak>).unwrap(); diff --git a/substrate/core/network/src/service.rs b/substrate/core/network/src/service.rs index 4b27c843d9..c900cae928 100644 --- a/substrate/core/network/src/service.rs +++ b/substrate/core/network/src/service.rs @@ -165,7 +165,8 @@ impl, H: ExHashT> Service { /// Creates and register protocol with the network service pub fn new(params: Params, protocol_id: ProtocolId) -> Result>, Error> { let chain = params.chain.clone(); - let import_queue = Arc::new(AsyncImportQueue::new()); + // TODO: non-insatnt finality. + let import_queue = Arc::new(AsyncImportQueue::new(true)); let handler = Arc::new(ProtocolHandler { protocol: Protocol::new( params.config, diff --git a/substrate/core/network/src/specialization.rs b/substrate/core/network/src/specialization.rs index 3c04a36705..1c2d2d7274 100644 --- a/substrate/core/network/src/specialization.rs +++ b/substrate/core/network/src/specialization.rs @@ -44,5 +44,6 @@ pub trait Specialization: Send + Sync + 'static { fn maintain_peers(&mut self, _ctx: &mut Context) { } /// Called when a block is _imported_ at the head of the chain (not during major sync). + /// Not guaranteed to be called for every block, but will be most of the after major sync. fn on_block_imported(&mut self, _ctx: &mut Context, _hash: B::Hash, _header: &B::Header) { } } diff --git a/substrate/core/network/src/test/mod.rs b/substrate/core/network/src/test/mod.rs index 7a78dd28fc..0fa5196a03 100644 --- a/substrate/core/network/src/test/mod.rs +++ b/substrate/core/network/src/test/mod.rs @@ -173,8 +173,8 @@ impl Peer { fn flush(&self) { } - fn generate_blocks(&self, count: usize, mut edit_block: F) - where F: FnMut(&mut BlockBuilder) + fn generate_blocks(&self, count: usize, mut edit_block: F) + where F: FnMut(&mut BlockBuilder) { for _ in 0 .. count { let mut builder = self.client.new_block().unwrap(); @@ -246,7 +246,7 @@ impl TestNet { pub fn add_peer(&mut self, config: &ProtocolConfig) { let client = Arc::new(test_client::new()); let tx_pool = Arc::new(EmptyTransactionPool); - let import_queue = Arc::new(SyncImportQueue); + let import_queue = Arc::new(SyncImportQueue(false)); let sync = Protocol::new(config.clone(), client.clone(), import_queue, None, tx_pool, DummySpecialization).unwrap(); self.peers.push(Arc::new(Peer { sync: sync, diff --git a/substrate/core/test-client/src/client_ext.rs b/substrate/core/test-client/src/client_ext.rs index 624b360f0b..7d1a9b433f 100644 --- a/substrate/core/test-client/src/client_ext.rs +++ b/substrate/core/test-client/src/client_ext.rs @@ -18,7 +18,7 @@ use client::{self, Client}; use keyring::Keyring; -use runtime_primitives::StorageMap; +use runtime_primitives::{generic::BlockId, StorageMap}; use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use executor::NativeExecutor; use runtime; @@ -30,9 +30,12 @@ pub trait TestClient { /// Crates new client instance for tests. fn new_for_tests() -> Self; - /// Justify and import block to the chain. Instant finality. + /// Justify and import block to the chain. No finality. fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()>; + /// Finalize a block. + fn finalize_block(&self, id: BlockId) -> client::error::Result<()>; + /// Returns hash of the genesis block. fn genesis_hash(&self) -> runtime::Hash; } @@ -45,11 +48,15 @@ impl TestClient for Client { fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> { let justification = fake_justify(&block.header); let justified = self.check_justification(block.header, justification)?; - self.import_block(origin, justified, Some(block.extrinsics), true)?; + self.import_block(origin, justified, Some(block.extrinsics), false)?; Ok(()) } + fn finalize_block(&self, id: BlockId) -> client::error::Result<()> { + self.finalize_block(id, true) + } + fn genesis_hash(&self) -> runtime::Hash { self.block_hash(0).unwrap().unwrap() }