Finalized block event triggers tx maintanance (#12305)

* finalized block event triggers tx maintanance

* tx-pool: enactment helper introduced

* tx-pool: ChainApi: added tree_route method

* enactment logic implemented + tests

Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>

* Some additional tests

* minor improvements

* trigger CI job

* fix compilation errors

ChainApi::tree_route return type changed to Result<Option<..>>, as some
implementations (tests) are not required to provide this tree route.

* formatting

* trait removed

* implementation slightly simplified

(thanks to @koute)

* get rid of Arc<> in EnactmentState return value

* minor improvement

* Apply suggestions from code review

Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com>

* Apply suggestions from code review

* comment updated + formatting

* Apply suggestions from code review

Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com>
Co-authored-by: Davide Galassi <davxy@datawok.net>

* formatting

* finalization notification bug fix

+ new test case
+ log::warn message when finalized block is being retracted by new event

* added error message on tree_route failure

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* use provided tree_route in Finalized event

* Option removed from ChainApi::tree_route

* doc added, test and logs improved

* handle_enactment aligned with original implementation

* use async-await

* Apply suggestions from code review

Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com>

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* formatting + warn->debug

* compilation error fix

* enactment_state initializers added

* enactment_state: Option removed

* manual-seal: compilation & tests fix

* manual-seal: tests fixed

* tests cleanup

* another compilation error fixed

* TreeRoute::new added

* get rid of pub hack

* one more test added

* formatting

* TreeRoute::new doc added + formatting

* Apply suggestions from code review

Co-authored-by: Davide Galassi <davxy@datawok.net>

* (bool,Option) simplified to Option

* log message improved

* yet another review suggestions applied

* get rid of hash in handle_enactment

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Update client/transaction-pool/src/lib.rs

Co-authored-by: Bastian Köcher <git@kchr.de>

* minor corrections

* EnactmentState moved to new file

* File header corrected

* error formatting aligned with codebase

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* remove commented code

* small nits

Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>
Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com>
Co-authored-by: Davide Galassi <davxy@datawok.net>
Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: André Silva <andrerfosilva@gmail.com>
This commit is contained in:
Michal Kucharczyk
2022-10-11 22:20:13 +02:00
committed by GitHub
parent 023aa03fea
commit 62bca87f3a
14 changed files with 1470 additions and 195 deletions
+2
View File
@@ -9044,6 +9044,7 @@ version = "4.0.0-dev"
dependencies = [
"array-bytes",
"assert_matches",
"async-trait",
"criterion",
"futures",
"futures-timer",
@@ -9075,6 +9076,7 @@ dependencies = [
name = "sc-transaction-pool-api"
version = "4.0.0-dev"
dependencies = [
"async-trait",
"futures",
"log",
"serde",
@@ -305,9 +305,8 @@ pub async fn run_instant_seal_and_finalize<B, BI, CB, E, C, TP, SC, CIDP, P>(
mod tests {
use super::*;
use sc_basic_authorship::ProposerFactory;
use sc_client_api::BlockBackend;
use sc_consensus::ImportedAux;
use sc_transaction_pool::{BasicPool, Options, RevalidationType};
use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType};
use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource};
use sp_inherents::InherentData;
use sp_runtime::generic::{BlockId, Digest, DigestItem};
@@ -359,6 +358,7 @@ mod tests {
let (client, select_chain) = builder.build_with_longest_chain();
let client = Arc::new(client);
let spawner = sp_core::testing::TaskExecutor::new();
let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash();
let pool = Arc::new(BasicPool::with_revalidation_type(
Options::default(),
true.into(),
@@ -367,6 +367,8 @@ mod tests {
RevalidationType::Full,
spawner.clone(),
0,
genesis_hash,
genesis_hash,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as transactions are imported into the
@@ -429,6 +431,7 @@ mod tests {
let (client, select_chain) = builder.build_with_longest_chain();
let client = Arc::new(client);
let spawner = sp_core::testing::TaskExecutor::new();
let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash();
let pool = Arc::new(BasicPool::with_revalidation_type(
Options::default(),
true.into(),
@@ -437,6 +440,8 @@ mod tests {
RevalidationType::Full,
spawner.clone(),
0,
genesis_hash,
genesis_hash,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as an engine command is sent over the
@@ -505,8 +510,13 @@ mod tests {
let builder = TestClientBuilder::new();
let (client, select_chain) = builder.build_with_longest_chain();
let client = Arc::new(client);
let pool_api = api();
let pool_api = Arc::new(FullChainApi::new(
client.clone(),
None,
&sp_core::testing::TaskExecutor::new(),
));
let spawner = sp_core::testing::TaskExecutor::new();
let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash();
let pool = Arc::new(BasicPool::with_revalidation_type(
Options::default(),
true.into(),
@@ -515,6 +525,8 @@ mod tests {
RevalidationType::Full,
spawner.clone(),
0,
genesis_hash,
genesis_hash,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
// this test checks that blocks are created as soon as an engine command is sent over the
@@ -550,7 +562,6 @@ mod tests {
.await
.unwrap();
let created_block = rx.await.unwrap().unwrap();
pool_api.increment_nonce(Alice.into());
// assert that the background task returns ok
assert_eq!(
@@ -566,8 +577,7 @@ mod tests {
}
}
);
let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block;
pool_api.add_block(block, true);
assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok());
let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above");
@@ -588,9 +598,6 @@ mod tests {
.await
.is_ok());
assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_));
let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block;
pool_api.add_block(block, true);
pool_api.increment_nonce(Alice.into());
assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok());
let (tx2, rx2) = futures::channel::oneshot::channel();
@@ -614,6 +621,7 @@ mod tests {
let (client, select_chain) = builder.build_with_longest_chain();
let client = Arc::new(client);
let spawner = sp_core::testing::TaskExecutor::new();
let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash();
let pool = Arc::new(BasicPool::with_revalidation_type(
Options::default(),
true.into(),
@@ -622,6 +630,8 @@ mod tests {
RevalidationType::Full,
spawner.clone(),
0,
genesis_hash,
genesis_hash,
));
let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None);
@@ -13,6 +13,7 @@ readme = "README.md"
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
async-trait = "0.1.57"
codec = { package = "parity-scale-codec", version = "3.0.0" }
futures = "0.3.21"
futures-timer = "3.0.2"
@@ -9,6 +9,7 @@ repository = "https://github.com/paritytech/substrate/"
description = "Transaction pool client facing API."
[dependencies]
async-trait = "0.1.57"
futures = "0.3.21"
log = "0.4.17"
serde = { version = "1.0.136", features = ["derive"] }
@@ -21,6 +21,7 @@
pub mod error;
use async_trait::async_trait;
use futures::{Future, Stream};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use sp_runtime::{
@@ -303,9 +304,10 @@ pub enum ChainEvent<B: BlockT> {
}
/// Trait for transaction pool maintenance.
#[async_trait]
pub trait MaintainedTransactionPool: TransactionPool {
/// Perform maintenance
fn maintain(&self, event: ChainEvent<Self::Block>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
async fn maintain(&self, event: ChainEvent<Self::Block>);
}
/// Transaction pool interface for submitting local transactions that exposes a
@@ -121,6 +121,14 @@ impl ChainApi for TestApi {
) -> Result<Option<<Self::Block as BlockT>::Header>, Self::Error> {
Ok(None)
}
fn tree_route(
&self,
_from: <Self::Block as BlockT>::Hash,
_to: <Self::Block as BlockT>::Hash,
) -> Result<sp_blockchain::TreeRoute<Self::Block>, Self::Error> {
unimplemented!()
}
}
fn uxt(transfer: Transfer) -> Extrinsic {
+24 -6
View File
@@ -30,6 +30,7 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc};
use prometheus_endpoint::Registry as PrometheusRegistry;
use sc_client_api::{blockchain::HeaderBackend, BlockBackend};
use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_blockchain::{HeaderMetadata, TreeRoute};
use sp_core::traits::SpawnEssentialNamed;
use sp_runtime::{
generic::BlockId,
@@ -111,8 +112,11 @@ impl<Client, Block> FullChainApi<Client, Block> {
impl<Client, Block> graph::ChainApi for FullChainApi<Client, Block>
where
Block: BlockT,
Client:
ProvideRuntimeApi<Block> + BlockBackend<Block> + BlockIdTo<Block> + HeaderBackend<Block>,
Client: ProvideRuntimeApi<Block>
+ BlockBackend<Block>
+ BlockIdTo<Block>
+ HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>,
Client: Send + Sync + 'static,
Client::Api: TaggedTransactionQueue<Block>,
{
@@ -190,6 +194,14 @@ where
) -> Result<Option<<Self::Block as BlockT>::Header>, Self::Error> {
self.client.header(*at).map_err(Into::into)
}
fn tree_route(
&self,
from: <Self::Block as BlockT>::Hash,
to: <Self::Block as BlockT>::Hash,
) -> Result<TreeRoute<Self::Block>, Self::Error> {
sp_blockchain::tree_route::<Block, Client>(&*self.client, from, to).map_err(Into::into)
}
}
/// Helper function to validate a transaction using a full chain API.
@@ -202,8 +214,11 @@ fn validate_transaction_blocking<Client, Block>(
) -> error::Result<TransactionValidity>
where
Block: BlockT,
Client:
ProvideRuntimeApi<Block> + BlockBackend<Block> + BlockIdTo<Block> + HeaderBackend<Block>,
Client: ProvideRuntimeApi<Block>
+ BlockBackend<Block>
+ BlockIdTo<Block>
+ HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>,
Client: Send + Sync + 'static,
Client::Api: TaggedTransactionQueue<Block>,
{
@@ -264,8 +279,11 @@ where
impl<Client, Block> FullChainApi<Client, Block>
where
Block: BlockT,
Client:
ProvideRuntimeApi<Block> + BlockBackend<Block> + BlockIdTo<Block> + HeaderBackend<Block>,
Client: ProvideRuntimeApi<Block>
+ BlockBackend<Block>
+ BlockIdTo<Block>
+ HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>,
Client: Send + Sync + 'static,
Client::Api: TaggedTransactionQueue<Block>,
{
@@ -0,0 +1,579 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate transaction pool implementation.
use sc_transaction_pool_api::ChainEvent;
use sp_blockchain::TreeRoute;
use sp_runtime::traits::Block as BlockT;
/// Helper struct for keeping track of the current state of processed new best
/// block and finalized events. The main purpose of keeping track of this state
/// is to figure out if a transaction pool enactment is needed or not.
///
/// Given the following chain:
///
/// B1-C1-D1-E1
/// /
/// A
/// \
/// B2-C2-D2-E2
///
/// Some scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) and `Finalized`
/// (`f`) events:
///
/// - `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(C1))`
/// - `f(C1)`, `nbb(C1)` -> false (enactment was already performed in `f(C1))`
/// - `f(C1)`, `nbb(D2)` -> false (enactment was already performed in `f(C1)`,
/// we should not retract finalized block)
/// - `f(C1)`, `f(C2)`, `nbb(C1)` -> false
/// - `nbb(C1)`, `nbb(C2)` -> true (switching fork is OK)
/// - `nbb(B1)`, `nbb(B2)` -> true
/// - `nbb(B1)`, `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(B1)`)
/// - `nbb(C1)`, `f(B1)` -> false (enactment was already performed in `nbb(B2)`)
pub struct EnactmentState<Block>
where
Block: BlockT,
{
recent_best_block: Block::Hash,
recent_finalized_block: Block::Hash,
}
impl<Block> EnactmentState<Block>
where
Block: BlockT,
{
/// Returns a new `EnactmentState` initialized with the given parameters.
pub fn new(recent_best_block: Block::Hash, recent_finalized_block: Block::Hash) -> Self {
EnactmentState { recent_best_block, recent_finalized_block }
}
/// Returns the recently finalized block.
pub fn recent_finalized_block(&self) -> Block::Hash {
self.recent_finalized_block
}
/// Updates the state according to the given `ChainEvent`, returning
/// `Some(tree_route)` with a tree route including the blocks that need to
/// be enacted/retracted. If no enactment is needed then `None` is returned.
pub fn update<F>(
&mut self,
event: &ChainEvent<Block>,
tree_route: &F,
) -> Result<Option<TreeRoute<Block>>, String>
where
F: Fn(Block::Hash, Block::Hash) -> Result<TreeRoute<Block>, String>,
{
let (new_hash, finalized) = match event {
ChainEvent::NewBestBlock { hash, .. } => (*hash, false),
ChainEvent::Finalized { hash, .. } => (*hash, true),
};
// block was already finalized
if self.recent_finalized_block == new_hash {
log::debug!(target: "txpool", "handle_enactment: block already finalized");
return Ok(None)
}
// compute actual tree route from best_block to notified block, and use
// it instead of tree_route provided with event
let tree_route = tree_route(self.recent_best_block, new_hash)?;
log::debug!(
target: "txpool",
"resolve hash:{:?} finalized:{:?} tree_route:{:?} best_block:{:?} finalized_block:{:?}",
new_hash, finalized, tree_route, self.recent_best_block, self.recent_finalized_block
);
// check if recently finalized block is on retracted path. this could be
// happening if we first received a finalization event and then a new
// best event for some old stale best head.
if tree_route.retracted().iter().any(|x| x.hash == self.recent_finalized_block) {
log::debug!(
target: "txpool",
"Recently finalized block {} would be retracted by ChainEvent {}, skipping",
self.recent_finalized_block, new_hash
);
return Ok(None)
}
if finalized {
self.recent_finalized_block = new_hash;
// if there are no enacted blocks in best_block -> hash tree_route,
// it means that block being finalized was already enacted (this
// case also covers best_block == new_hash), recent_best_block
// remains valid.
if tree_route.enacted().is_empty() {
log::trace!(
target: "txpool",
"handle_enactment: no newly enacted blocks since recent best block"
);
return Ok(None)
}
// otherwise enacted finalized block becomes best block...
}
self.recent_best_block = new_hash;
Ok(Some(tree_route))
}
}
#[cfg(test)]
mod enactment_state_tests {
use super::EnactmentState;
use sc_transaction_pool_api::ChainEvent;
use sp_blockchain::{HashAndNumber, TreeRoute};
use std::sync::Arc;
use substrate_test_runtime_client::runtime::{Block, Hash};
// some helpers for convenient blocks' hash naming
fn a() -> HashAndNumber<Block> {
HashAndNumber { number: 1, hash: Hash::from([0xAA; 32]) }
}
fn b1() -> HashAndNumber<Block> {
HashAndNumber { number: 2, hash: Hash::from([0xB1; 32]) }
}
fn c1() -> HashAndNumber<Block> {
HashAndNumber { number: 3, hash: Hash::from([0xC1; 32]) }
}
fn d1() -> HashAndNumber<Block> {
HashAndNumber { number: 4, hash: Hash::from([0xD1; 32]) }
}
fn e1() -> HashAndNumber<Block> {
HashAndNumber { number: 5, hash: Hash::from([0xE1; 32]) }
}
fn b2() -> HashAndNumber<Block> {
HashAndNumber { number: 2, hash: Hash::from([0xB2; 32]) }
}
fn c2() -> HashAndNumber<Block> {
HashAndNumber { number: 3, hash: Hash::from([0xC2; 32]) }
}
fn d2() -> HashAndNumber<Block> {
HashAndNumber { number: 4, hash: Hash::from([0xD2; 32]) }
}
fn e2() -> HashAndNumber<Block> {
HashAndNumber { number: 5, hash: Hash::from([0xE2; 32]) }
}
/// mock tree_route computing function for simple two-forks chain
fn tree_route(from: Hash, to: Hash) -> Result<TreeRoute<Block>, String> {
let chain = vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()];
let pivot = 4_usize;
let from = chain
.iter()
.position(|bn| bn.hash == from)
.ok_or("existing block should be given")?;
let to = chain
.iter()
.position(|bn| bn.hash == to)
.ok_or("existing block should be given")?;
// B1-C1-D1-E1
// /
// A
// \
// B2-C2-D2-E2
//
// [E1 D1 C1 B1 A B2 C2 D2 E2]
let vec: Vec<HashAndNumber<Block>> = if from < to {
chain.into_iter().skip(from).take(to - from + 1).collect()
} else {
chain.into_iter().skip(to).take(from - to + 1).rev().collect()
};
let pivot = if from <= pivot && to <= pivot {
if from < to {
to - from
} else {
0
}
} else if from >= pivot && to >= pivot {
if from < to {
0
} else {
from - to
}
} else {
if from < to {
pivot - from
} else {
from - pivot
}
};
Ok(TreeRoute::new(vec, pivot))
}
mod mock_tree_route_tests {
use super::*;
/// asserts that tree routes are equal
fn assert_treeroute_eq(expected: TreeRoute<Block>, result: TreeRoute<Block>) {
assert_eq!(result.common_block().hash, expected.common_block().hash);
assert_eq!(result.enacted().len(), expected.enacted().len());
assert_eq!(result.retracted().len(), expected.retracted().len());
assert!(result
.enacted()
.iter()
.zip(expected.enacted().iter())
.all(|(a, b)| a.hash == b.hash));
assert!(result
.retracted()
.iter()
.zip(expected.retracted().iter())
.all(|(a, b)| a.hash == b.hash));
}
// some tests for mock tree_route function
#[test]
fn tree_route_mock_test_01() {
let result = tree_route(b1().hash, a().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b1(), a()], 1);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_02() {
let result = tree_route(a().hash, b1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![a(), b1()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_03() {
let result = tree_route(a().hash, c2().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![a(), b2(), c2()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_04() {
let result = tree_route(e2().hash, a().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_05() {
let result = tree_route(d1().hash, b1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_06() {
let result = tree_route(d2().hash, b2().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_07() {
let result = tree_route(b1().hash, d1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_08() {
let result = tree_route(b2().hash, d2().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_09() {
let result = tree_route(e2().hash, e1().hash).expect("tree route exists");
let expected =
TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_10() {
let result = tree_route(e1().hash, e2().hash).expect("tree route exists");
let expected =
TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_11() {
let result = tree_route(b1().hash, c2().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_12() {
let result = tree_route(d2().hash, b1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_13() {
let result = tree_route(c2().hash, e1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_14() {
let result = tree_route(b1().hash, b1().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b1()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_15() {
let result = tree_route(b2().hash, b2().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![b2()], 0);
assert_treeroute_eq(result, expected);
}
#[test]
fn tree_route_mock_test_16() {
let result = tree_route(a().hash, a().hash).expect("tree route exists");
let expected = TreeRoute::new(vec![a()], 0);
assert_treeroute_eq(result, expected);
}
}
fn trigger_new_best_block(
state: &mut EnactmentState<Block>,
from: HashAndNumber<Block>,
acted_on: HashAndNumber<Block>,
) -> bool {
let (from, acted_on) = (from.hash, acted_on.hash);
let event_tree_route = tree_route(from, acted_on).expect("Tree route exists");
state
.update(
&ChainEvent::NewBestBlock {
hash: acted_on,
tree_route: Some(Arc::new(event_tree_route)),
},
&tree_route,
)
.unwrap()
.is_some()
}
fn trigger_finalized(
state: &mut EnactmentState<Block>,
from: HashAndNumber<Block>,
acted_on: HashAndNumber<Block>,
) -> bool {
let (from, acted_on) = (from.hash, acted_on.hash);
let v = tree_route(from, acted_on)
.expect("Tree route exists")
.enacted()
.iter()
.map(|h| h.hash)
.collect::<Vec<_>>();
state
.update(&ChainEvent::Finalized { hash: acted_on, tree_route: v.into() }, &tree_route)
.unwrap()
.is_some()
}
fn assert_es_eq(
es: &EnactmentState<Block>,
expected_best_block: HashAndNumber<Block>,
expected_finalized_block: HashAndNumber<Block>,
) {
assert_eq!(es.recent_best_block, expected_best_block.hash);
assert_eq!(es.recent_finalized_block, expected_finalized_block.hash);
}
#[test]
fn test_enactment_helper() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// B1-C1-D1-E1
// /
// A
// \
// B2-C2-D2-E2
let result = trigger_new_best_block(&mut es, a(), d1());
assert!(result);
assert_es_eq(&es, d1(), a());
let result = trigger_new_best_block(&mut es, d1(), e1());
assert!(result);
assert_es_eq(&es, e1(), a());
let result = trigger_finalized(&mut es, a(), d2());
assert!(result);
assert_es_eq(&es, d2(), d2());
let result = trigger_new_best_block(&mut es, d2(), e1());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_finalized(&mut es, a(), b2());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_finalized(&mut es, a(), b1());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_new_best_block(&mut es, a(), d2());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_finalized(&mut es, a(), d2());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_new_best_block(&mut es, a(), c2());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_new_best_block(&mut es, a(), c1());
assert_eq!(result, false);
assert_es_eq(&es, d2(), d2());
let result = trigger_new_best_block(&mut es, d2(), e2());
assert!(result);
assert_es_eq(&es, e2(), d2());
let result = trigger_finalized(&mut es, d2(), e2());
assert_eq!(result, false);
assert_es_eq(&es, e2(), e2());
}
#[test]
fn test_enactment_helper_2() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// A-B1-C1-D1-E1
let result = trigger_new_best_block(&mut es, a(), b1());
assert!(result);
assert_es_eq(&es, b1(), a());
let result = trigger_new_best_block(&mut es, b1(), c1());
assert!(result);
assert_es_eq(&es, c1(), a());
let result = trigger_new_best_block(&mut es, c1(), d1());
assert!(result);
assert_es_eq(&es, d1(), a());
let result = trigger_new_best_block(&mut es, d1(), e1());
assert!(result);
assert_es_eq(&es, e1(), a());
let result = trigger_finalized(&mut es, a(), c1());
assert_eq!(result, false);
assert_es_eq(&es, e1(), c1());
let result = trigger_finalized(&mut es, c1(), e1());
assert_eq!(result, false);
assert_es_eq(&es, e1(), e1());
}
#[test]
fn test_enactment_helper_3() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// A-B1-C1-D1-E1
let result = trigger_new_best_block(&mut es, a(), e1());
assert!(result);
assert_es_eq(&es, e1(), a());
let result = trigger_finalized(&mut es, a(), b1());
assert_eq!(result, false);
assert_es_eq(&es, e1(), b1());
}
#[test]
fn test_enactment_helper_4() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// A-B1-C1-D1-E1
let result = trigger_finalized(&mut es, a(), e1());
assert!(result);
assert_es_eq(&es, e1(), e1());
let result = trigger_finalized(&mut es, e1(), b1());
assert_eq!(result, false);
assert_es_eq(&es, e1(), e1());
}
#[test]
fn test_enactment_helper_5() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// B1-C1-D1-E1
// /
// A
// \
// B2-C2-D2-E2
let result = trigger_finalized(&mut es, a(), e1());
assert!(result);
assert_es_eq(&es, e1(), e1());
let result = trigger_finalized(&mut es, e1(), e2());
assert_eq!(result, false);
assert_es_eq(&es, e1(), e1());
}
#[test]
fn test_enactment_helper_6() {
sp_tracing::try_init_simple();
let mut es = EnactmentState::new(a().hash, a().hash);
// A-B1-C1-D1-E1
let result = trigger_new_best_block(&mut es, a(), b1());
assert!(result);
assert_es_eq(&es, b1(), a());
let result = trigger_finalized(&mut es, a(), d1());
assert!(result);
assert_es_eq(&es, d1(), d1());
let result = trigger_new_best_block(&mut es, a(), e1());
assert!(result);
assert_es_eq(&es, e1(), d1());
let result = trigger_new_best_block(&mut es, a(), c1());
assert_eq!(result, false);
assert_es_eq(&es, e1(), d1());
}
}
@@ -20,6 +20,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration};
use futures::{channel::mpsc::Receiver, Future};
use sc_transaction_pool_api::error;
use sp_blockchain::TreeRoute;
use sp_runtime::{
generic::BlockId,
traits::{self, Block as BlockT, SaturatedConversion},
@@ -97,6 +98,13 @@ pub trait ChainApi: Send + Sync {
&self,
at: &BlockId<Self::Block>,
) -> Result<Option<<Self::Block as BlockT>::Header>, Self::Error>;
/// Compute a tree-route between two blocks. See [`TreeRoute`] for more details.
fn tree_route(
&self,
from: <Self::Block as BlockT>::Hash,
to: <Self::Block as BlockT>::Hash,
) -> Result<TreeRoute<Self::Block>, Self::Error>;
}
/// Pool configuration options.
+201 -153
View File
@@ -23,6 +23,7 @@
#![warn(unused_extern_crates)]
mod api;
mod enactment_state;
pub mod error;
mod graph;
mod metrics;
@@ -31,6 +32,8 @@ mod revalidation;
mod tests;
pub use crate::api::FullChainApi;
use async_trait::async_trait;
use enactment_state::EnactmentState;
use futures::{
channel::oneshot,
future::{self, ready},
@@ -62,6 +65,8 @@ use std::time::Instant;
use crate::metrics::MetricsLink as PrometheusMetrics;
use prometheus_endpoint::Registry as PrometheusRegistry;
use sp_blockchain::{HashAndNumber, TreeRoute};
type BoxedReadyIterator<Hash, Data> =
Box<dyn ReadyTransactions<Item = Arc<graph::base_pool::Transaction<Hash, Data>>> + Send>;
@@ -85,6 +90,7 @@ where
revalidation_queue: Arc<revalidation::RevalidationQueue<PoolApi>>,
ready_poll: Arc<Mutex<ReadyPoll<ReadyIteratorFor<PoolApi>, Block>>>,
metrics: PrometheusMetrics,
enactment_state: Arc<Mutex<EnactmentState<Block>>>,
}
struct ReadyPoll<T, Block: BlockT> {
@@ -163,7 +169,11 @@ where
PoolApi: graph::ChainApi<Block = Block> + 'static,
{
/// Create new basic transaction pool with provided api, for tests.
pub fn new_test(pool_api: Arc<PoolApi>) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) {
pub fn new_test(
pool_api: Arc<PoolApi>,
best_block_hash: Block::Hash,
finalized_hash: Block::Hash,
) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) {
let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone()));
let (revalidation_queue, background_task) =
revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone());
@@ -175,6 +185,10 @@ where
revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)),
ready_poll: Default::default(),
metrics: Default::default(),
enactment_state: Arc::new(Mutex::new(EnactmentState::new(
best_block_hash,
finalized_hash,
))),
},
background_task,
)
@@ -190,6 +204,8 @@ where
revalidation_type: RevalidationType,
spawner: impl SpawnEssentialNamed,
best_block_number: NumberFor<Block>,
best_block_hash: Block::Hash,
finalized_hash: Block::Hash,
) -> Self {
let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone()));
let (revalidation_queue, background_task) = match revalidation_type {
@@ -217,6 +233,10 @@ where
})),
ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))),
metrics: PrometheusMetrics::new(prometheus),
enactment_state: Arc::new(Mutex::new(EnactmentState::new(
best_block_hash,
finalized_hash,
))),
}
}
@@ -358,6 +378,7 @@ where
+ sp_runtime::traits::BlockIdTo<Block>
+ sc_client_api::ExecutorProvider<Block>
+ sc_client_api::UsageProvider<Block>
+ sp_blockchain::HeaderMetadata<Block, Error = sp_blockchain::Error>
+ Send
+ Sync
+ 'static,
@@ -380,6 +401,8 @@ where
RevalidationType::Full,
spawner,
client.usage_info().chain.best_number,
client.usage_info().chain.best_hash,
client.usage_info().chain.finalized_hash,
));
// make transaction pool available for off-chain runtime calls.
@@ -396,7 +419,8 @@ where
Client: sp_api::ProvideRuntimeApi<Block>
+ sc_client_api::BlockBackend<Block>
+ sc_client_api::blockchain::HeaderBackend<Block>
+ sp_runtime::traits::BlockIdTo<Block>,
+ sp_runtime::traits::BlockIdTo<Block>
+ sp_blockchain::HeaderMetadata<Block, Error = sp_blockchain::Error>,
Client: Send + Sync + 'static,
Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>,
{
@@ -563,166 +587,190 @@ async fn prune_known_txs_for_block<Block: BlockT, Api: graph::ChainApi<Block = B
hashes
}
impl<PoolApi, Block> BasicPool<PoolApi, Block>
where
Block: BlockT,
PoolApi: 'static + graph::ChainApi<Block = Block>,
{
/// Handles enactment and retraction of blocks, prunes stale transactions
/// (that have already been enacted) and resubmits transactions that were
/// retracted.
async fn handle_enactment(&self, tree_route: TreeRoute<Block>) {
log::trace!(target: "txpool", "handle_enactment tree_route: {tree_route:?}");
let pool = self.pool.clone();
let api = self.api.clone();
let (hash, block_number) = match tree_route.last() {
Some(HashAndNumber { hash, number }) => (hash, number),
None => {
log::warn!(
target: "txpool",
"Skipping ChainEvent - no last block in tree route {:?}",
tree_route,
);
return
},
};
let next_action = self.revalidation_strategy.lock().next(
*block_number,
Some(std::time::Duration::from_secs(60)),
Some(20u32.into()),
);
// We keep track of everything we prune so that later we won't add
// transactions with those hashes from the retracted blocks.
let mut pruned_log = HashSet::<ExtrinsicHash<PoolApi>>::new();
// If there is a tree route, we use this to prune known tx based on the enacted
// blocks. Before pruning enacted transactions, we inform the listeners about
// retracted blocks and their transactions. This order is important, because
// if we enact and retract the same transaction at the same time, we want to
// send first the retract and than the prune event.
for retracted in tree_route.retracted() {
// notify txs awaiting finality that it has been retracted
pool.validated_pool().on_block_retracted(retracted.hash);
}
future::join_all(
tree_route
.enacted()
.iter()
.map(|h| prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool)),
)
.await
.into_iter()
.for_each(|enacted_log| {
pruned_log.extend(enacted_log);
});
self.metrics
.report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64));
if next_action.resubmit {
let mut resubmit_transactions = Vec::new();
for retracted in tree_route.retracted() {
let hash = retracted.hash;
let block_transactions = api
.block_body(&BlockId::hash(hash))
.await
.unwrap_or_else(|e| {
log::warn!("Failed to fetch block body: {}", e);
None
})
.unwrap_or_default()
.into_iter()
.filter(|tx| tx.is_signed().unwrap_or(true));
let mut resubmitted_to_report = 0;
resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| {
let tx_hash = pool.hash_of(tx);
let contains = pruned_log.contains(&tx_hash);
// need to count all transactions, not just filtered, here
resubmitted_to_report += 1;
if !contains {
log::debug!(
target: "txpool",
"[{:?}]: Resubmitting from retracted block {:?}",
tx_hash,
hash,
);
}
!contains
}));
self.metrics.report(|metrics| {
metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report)
});
}
if let Err(e) = pool
.resubmit_at(
&BlockId::Hash(*hash),
// These transactions are coming from retracted blocks, we should
// simply consider them external.
TransactionSource::External,
resubmit_transactions,
)
.await
{
log::debug!(
target: "txpool",
"[{:?}] Error re-submitting transactions: {}",
hash,
e,
)
}
}
let extra_pool = pool.clone();
// After #5200 lands, this arguably might be moved to the
// handler of "all blocks notification".
self.ready_poll
.lock()
.trigger(*block_number, move || Box::new(extra_pool.validated_pool().ready()));
if next_action.revalidate {
let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect();
self.revalidation_queue.revalidate_later(*block_number, hashes).await;
self.revalidation_strategy.lock().clear();
}
}
}
#[async_trait]
impl<PoolApi, Block> MaintainedTransactionPool for BasicPool<PoolApi, Block>
where
Block: BlockT,
PoolApi: 'static + graph::ChainApi<Block = Block>,
{
fn maintain(&self, event: ChainEvent<Self::Block>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
match event {
ChainEvent::NewBestBlock { hash, tree_route } => {
let pool = self.pool.clone();
let api = self.api.clone();
async fn maintain(&self, event: ChainEvent<Self::Block>) {
let prev_finalized_block = self.enactment_state.lock().recent_finalized_block();
let compute_tree_route = |from, to| -> Result<TreeRoute<Block>, String> {
match self.api.tree_route(from, to) {
Ok(tree_route) => Ok(tree_route),
Err(e) =>
return Err(format!(
"Error occurred while computing tree_route from {from:?} to {to:?}: {e}"
)),
}
};
let id = BlockId::hash(hash);
let block_number = match api.block_id_to_number(&id) {
Ok(Some(number)) => number,
_ => {
log::trace!(
target: "txpool",
"Skipping chain event - no number for that block {:?}",
id,
);
return Box::pin(ready(()))
},
};
let result = self.enactment_state.lock().update(&event, &compute_tree_route);
let next_action = self.revalidation_strategy.lock().next(
block_number,
Some(std::time::Duration::from_secs(60)),
Some(20u32.into()),
);
let revalidation_strategy = self.revalidation_strategy.clone();
let revalidation_queue = self.revalidation_queue.clone();
let ready_poll = self.ready_poll.clone();
let metrics = self.metrics.clone();
async move {
// We keep track of everything we prune so that later we won't add
// transactions with those hashes from the retracted blocks.
let mut pruned_log = HashSet::<ExtrinsicHash<PoolApi>>::new();
// If there is a tree route, we use this to prune known tx based on the enacted
// blocks. Before pruning enacted transactions, we inform the listeners about
// retracted blocks and their transactions. This order is important, because
// if we enact and retract the same transaction at the same time, we want to
// send first the retract and than the prune event.
if let Some(ref tree_route) = tree_route {
for retracted in tree_route.retracted() {
// notify txs awaiting finality that it has been retracted
pool.validated_pool().on_block_retracted(retracted.hash);
}
future::join_all(tree_route.enacted().iter().map(|h| {
prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool)
}))
.await
.into_iter()
.for_each(|enacted_log| {
pruned_log.extend(enacted_log);
})
}
pruned_log.extend(prune_known_txs_for_block(id, &*api, &*pool).await);
metrics.report(|metrics| {
metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)
});
if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) {
let mut resubmit_transactions = Vec::new();
for retracted in tree_route.retracted() {
let hash = retracted.hash;
let block_transactions = api
.block_body(&BlockId::hash(hash))
.await
.unwrap_or_else(|e| {
log::warn!("Failed to fetch block body: {}", e);
None
})
.unwrap_or_default()
.into_iter()
.filter(|tx| tx.is_signed().unwrap_or(true));
let mut resubmitted_to_report = 0;
resubmit_transactions.extend(block_transactions.into_iter().filter(
|tx| {
let tx_hash = pool.hash_of(tx);
let contains = pruned_log.contains(&tx_hash);
// need to count all transactions, not just filtered, here
resubmitted_to_report += 1;
if !contains {
log::debug!(
target: "txpool",
"[{:?}]: Resubmitting from retracted block {:?}",
tx_hash,
hash,
);
}
!contains
},
));
metrics.report(|metrics| {
metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report)
});
}
if let Err(e) = pool
.resubmit_at(
&id,
// These transactions are coming from retracted blocks, we should
// simply consider them external.
TransactionSource::External,
resubmit_transactions,
)
.await
{
log::debug!(
target: "txpool",
"[{:?}] Error re-submitting transactions: {}",
id,
e,
)
}
}
let extra_pool = pool.clone();
// After #5200 lands, this arguably might be moved to the
// handler of "all blocks notification".
ready_poll.lock().trigger(block_number, move || {
Box::new(extra_pool.validated_pool().ready())
});
if next_action.revalidate {
let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect();
revalidation_queue.revalidate_later(block_number, hashes).await;
revalidation_strategy.lock().clear();
}
}
.boxed()
match result {
Err(msg) => {
log::warn!(target: "txpool", "{msg}");
return
},
ChainEvent::Finalized { hash, tree_route } => {
let pool = self.pool.clone();
async move {
for hash in tree_route.iter().chain(&[hash]) {
if let Err(e) = pool.validated_pool().on_block_finalized(*hash).await {
log::warn!(
target: "txpool",
"Error [{}] occurred while attempting to notify watchers of finalization {}",
e, hash
)
}
}
}
.boxed()
Ok(None) => {},
Ok(Some(tree_route)) => {
self.handle_enactment(tree_route).await;
},
};
if let ChainEvent::Finalized { hash, tree_route } = event {
log::trace!(
target: "txpool",
"on-finalized enacted: {tree_route:?}, previously finalized: \
{prev_finalized_block:?}",
);
for hash in tree_route.iter().chain(std::iter::once(&hash)) {
if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await {
log::warn!(
target: "txpool",
"Error occurred while attempting to notify watchers about finalization {}: {}",
hash, e
)
}
}
}
}
}
@@ -22,6 +22,7 @@ use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool};
use codec::Encode;
use parking_lot::Mutex;
use sc_transaction_pool_api::error;
use sp_blockchain::TreeRoute;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Hash},
@@ -173,6 +174,14 @@ impl ChainApi for TestApi {
) -> Result<Option<<Self::Block as BlockT>::Header>, Self::Error> {
Ok(None)
}
fn tree_route(
&self,
_from: <Self::Block as BlockT>::Hash,
_to: <Self::Block as BlockT>::Hash,
) -> Result<TreeRoute<Self::Block>, Self::Error> {
unimplemented!()
}
}
pub(crate) fn uxt(transfer: Transfer) -> Extrinsic {
+599 -25
View File
@@ -30,13 +30,14 @@ use sc_transaction_pool::*;
use sc_transaction_pool_api::{
ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus,
};
use sp_blockchain::HeaderBackend;
use sp_consensus::BlockOrigin;
use sp_runtime::{
generic::BlockId,
traits::Block as _,
transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction},
};
use std::{collections::BTreeSet, sync::Arc};
use std::{collections::BTreeSet, pin::Pin, sync::Arc};
use substrate_test_runtime_client::{
runtime::{Block, Extrinsic, Hash, Header, Index, Transfer},
AccountKeyring::*,
@@ -50,13 +51,32 @@ fn pool() -> Pool<TestApi> {
fn maintained_pool() -> (BasicPool<TestApi, Block>, Arc<TestApi>, futures::executor::ThreadPool) {
let api = Arc::new(TestApi::with_alice_nonce(209));
let (pool, background_task) = BasicPool::new_test(api.clone());
let (pool, background_task) = create_basic_pool_with_genesis(api.clone());
let thread_pool = futures::executor::ThreadPool::new().unwrap();
thread_pool.spawn_ok(background_task);
(pool, api, thread_pool)
}
fn create_basic_pool_with_genesis(
test_api: Arc<TestApi>,
) -> (BasicPool<TestApi, Block>, Pin<Box<dyn Future<Output = ()> + Send>>) {
let genesis_hash = {
test_api
.chain()
.read()
.block_by_number
.get(&0)
.map(|blocks| blocks[0].0.header.hash())
.expect("there is block 0. qed")
};
BasicPool::new_test(test_api, genesis_hash, genesis_hash)
}
fn create_basic_pool(test_api: TestApi) -> BasicPool<TestApi, Block> {
create_basic_pool_with_genesis(Arc::from(test_api)).0
}
const SOURCE: TransactionSource = TransactionSource::External;
#[test]
@@ -436,7 +456,7 @@ fn finalization() {
let xt = uxt(Alice, 209);
let api = TestApi::with_alice_nonce(209);
api.push_block(1, vec![], true);
let (pool, _background) = BasicPool::new_test(api.into());
let pool = create_basic_pool(api);
let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()))
.expect("1. Imported");
pool.api().push_block(2, vec![xt.clone()], true);
@@ -459,9 +479,9 @@ fn finalization() {
fn fork_aware_finalization() {
let api = TestApi::empty();
// starting block A1 (last finalized.)
api.push_block(1, vec![], true);
let a_header = api.push_block(1, vec![], true);
let (pool, _background) = BasicPool::new_test(api.into());
let pool = create_basic_pool(api);
let mut canon_watchers = vec![];
let from_alice = uxt(Alice, 1);
@@ -476,10 +496,13 @@ fn fork_aware_finalization() {
let from_dave_watcher;
let from_bob_watcher;
let b1;
let c1;
let d1;
let c2;
let d2;
block_on(pool.maintain(block_event(a_header)));
// block B1
{
let watcher =
@@ -489,6 +512,7 @@ fn fork_aware_finalization() {
canon_watchers.push((watcher, header.hash()));
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None };
b1 = header.hash();
block_on(pool.maintain(event));
@@ -504,6 +528,7 @@ fn fork_aware_finalization() {
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()))
.expect("1. Imported");
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> C2: {:?} {:?}", header.hash(), header);
let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None };
c2 = header.hash();
block_on(pool.maintain(event));
@@ -518,6 +543,7 @@ fn fork_aware_finalization() {
assert_eq!(pool.status().ready, 1);
let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true);
log::trace!(target:"txpool", ">> D2: {:?} {:?}", header.hash(), header);
let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None };
d2 = header.hash();
block_on(pool.maintain(event));
@@ -530,8 +556,9 @@ fn fork_aware_finalization() {
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()))
.expect("1.Imported");
assert_eq!(pool.status().ready, 1);
let header = pool.api().push_block(3, vec![from_charlie.clone()], true);
let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true);
log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header);
c1 = header.hash();
canon_watchers.push((watcher, header.hash()));
let event = block_event_with_retracted(header.clone(), d2, pool.api());
block_on(pool.maintain(event));
@@ -547,11 +574,12 @@ fn fork_aware_finalization() {
let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()))
.expect("1. Imported");
assert_eq!(pool.status().ready, 3);
let header = pool.api().push_block(4, vec![xt.clone()], true);
let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true);
log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header);
d1 = header.hash();
canon_watchers.push((w, header.hash()));
let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None };
d1 = header.hash();
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 2);
let event = ChainEvent::Finalized { hash: d1, tree_route: Arc::from(vec![]) };
@@ -560,9 +588,10 @@ fn fork_aware_finalization() {
let e1;
// block e1
// block E1
{
let header = pool.api().push_block(5, vec![from_dave, from_bob], true);
let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true);
log::trace!(target:"txpool", ">> E1: {:?} {:?}", header.hash(), header);
e1 = header.hash();
let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None };
block_on(pool.maintain(event));
@@ -610,7 +639,7 @@ fn prune_and_retract_tx_at_same_time() {
// starting block A1 (last finalized.)
api.push_block(1, vec![], true);
let (pool, _background) = BasicPool::new_test(api.into());
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
pool.api().increment_nonce(Alice.into());
@@ -676,7 +705,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() {
// starting block A1 (last finalized.)
api.push_block(1, vec![], true);
let (pool, _background) = BasicPool::new_test(api.into());
let pool = create_basic_pool(api);
let tx0 = uxt(Alice, 1);
let tx1 = uxt(Dave, 2);
@@ -721,7 +750,7 @@ fn resubmit_from_retracted_fork() {
// starting block A1 (last finalized.)
api.push_block(1, vec![], true);
let (pool, _background) = BasicPool::new_test(api.into());
let pool = create_basic_pool(api);
let tx0 = uxt(Alice, 1);
let tx1 = uxt(Dave, 2);
@@ -866,13 +895,14 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() {
#[test]
fn should_not_accept_old_signatures() {
let client = Arc::new(substrate_test_runtime_client::new());
let best_hash = client.info().best_hash;
let finalized_hash = client.info().finalized_hash;
let pool = Arc::new(
BasicPool::new_test(Arc::new(FullChainApi::new(
client,
None,
&sp_core::testing::TaskExecutor::new(),
)))
BasicPool::new_test(
Arc::new(FullChainApi::new(client, None, &sp_core::testing::TaskExecutor::new())),
best_hash,
finalized_hash,
)
.0,
);
@@ -908,12 +938,19 @@ fn should_not_accept_old_signatures() {
fn import_notification_to_pool_maintain_works() {
let mut client = Arc::new(substrate_test_runtime_client::new());
let best_hash = client.info().best_hash;
let finalized_hash = client.info().finalized_hash;
let pool = Arc::new(
BasicPool::new_test(Arc::new(FullChainApi::new(
client.clone(),
None,
&sp_core::testing::TaskExecutor::new(),
)))
BasicPool::new_test(
Arc::new(FullChainApi::new(
client.clone(),
None,
&sp_core::testing::TaskExecutor::new(),
)),
best_hash,
finalized_hash,
)
.0,
);
@@ -998,3 +1035,540 @@ fn stale_transactions_are_pruned() {
assert_eq!(pool.status().future, 0);
assert_eq!(pool.status().ready, 0);
}
#[test]
fn finalized_only_handled_correctly() {
sp_tracing::try_init_simple();
let xt = uxt(Alice, 209);
let (pool, api, _guard) = maintained_pool();
let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone()))
.expect("1. Imported");
assert_eq!(pool.status().ready, 1);
let header = api.push_block(1, vec![xt], false);
let event =
ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
{
let mut stream = futures::executor::block_on_stream(watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn best_block_after_finalized_handled_correctly() {
sp_tracing::try_init_simple();
let xt = uxt(Alice, 209);
let (pool, api, _guard) = maintained_pool();
let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone()))
.expect("1. Imported");
assert_eq!(pool.status().ready, 1);
let header = api.push_block(1, vec![xt], true);
let event =
ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
block_on(pool.maintain(block_event(header.clone())));
assert_eq!(pool.status().ready, 0);
{
let mut stream = futures::executor::block_on_stream(watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn switching_fork_with_finalized_works() {
sp_tracing::try_init_simple();
let api = TestApi::empty();
// starting block A1 (last finalized.)
let a_header = api.push_block(1, vec![], true);
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
let from_bob = uxt(Bob, 2);
pool.api().increment_nonce(Alice.into());
pool.api().increment_nonce(Bob.into());
let from_alice_watcher;
let from_bob_watcher;
let b1_header;
let b2_header;
// block B1
{
from_alice_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true);
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
b1_header = header;
}
// block B2
{
from_bob_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()))
.expect("1. Imported");
let header = pool.api().push_block_with_parent(
a_header.hash(),
vec![from_alice.clone(), from_bob.clone()],
true,
);
assert_eq!(pool.status().ready, 2);
log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header);
b2_header = header;
}
{
let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 1);
}
{
let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
let mut stream = futures::executor::block_on_stream(from_alice_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_bob_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn switching_fork_multiple_times_works() {
sp_tracing::try_init_simple();
let api = TestApi::empty();
// starting block A1 (last finalized.)
let a_header = api.push_block(1, vec![], true);
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
let from_bob = uxt(Bob, 2);
pool.api().increment_nonce(Alice.into());
pool.api().increment_nonce(Bob.into());
let from_alice_watcher;
let from_bob_watcher;
let b1_header;
let b2_header;
// block B1
{
from_alice_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true);
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
b1_header = header;
}
// block B2
{
from_bob_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()))
.expect("1. Imported");
let header = pool.api().push_block_with_parent(
a_header.hash(),
vec![from_alice.clone(), from_bob.clone()],
true,
);
assert_eq!(pool.status().ready, 2);
log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header);
b2_header = header;
}
{
// phase-0
let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 1);
}
{
// phase-1
let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api());
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
}
{
// phase-2
let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api());
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 1);
}
{
// phase-3
let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
let mut stream = futures::executor::block_on_stream(from_alice_watcher);
//phase-0
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
//phase-1
assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
//phase-2
assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
//phase-3
assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_bob_watcher);
//phase-1
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
//phase-2
assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
//phase-3
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn two_blocks_delayed_finalization_works() {
sp_tracing::try_init_simple();
let api = TestApi::empty();
// starting block A1 (last finalized.)
let a_header = api.push_block(1, vec![], true);
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
let from_bob = uxt(Bob, 2);
let from_charlie = uxt(Charlie, 3);
pool.api().increment_nonce(Alice.into());
pool.api().increment_nonce(Bob.into());
pool.api().increment_nonce(Charlie.into());
let from_alice_watcher;
let from_bob_watcher;
let from_charlie_watcher;
let b1_header;
let c1_header;
let d1_header;
// block B1
{
from_alice_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true);
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
b1_header = header;
}
// block C1
{
from_bob_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true);
assert_eq!(pool.status().ready, 2);
log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header);
c1_header = header;
}
// block D1
{
from_charlie_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true);
assert_eq!(pool.status().ready, 3);
log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header);
d1_header = header;
}
{
let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 3);
}
{
let event = ChainEvent::NewBestBlock { hash: d1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
}
{
let event = ChainEvent::Finalized {
hash: c1_header.hash(),
tree_route: Arc::from(vec![b1_header.hash()]),
};
block_on(pool.maintain(event));
}
// this is to collect events from_charlie_watcher and make sure nothing was retracted
{
let event = ChainEvent::Finalized { hash: d1_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
let mut stream = futures::executor::block_on_stream(from_alice_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_bob_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_charlie_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(d1_header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn delayed_finalization_does_not_retract() {
sp_tracing::try_init_simple();
let api = TestApi::empty();
// starting block A1 (last finalized.)
let a_header = api.push_block(1, vec![], true);
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
let from_bob = uxt(Bob, 2);
pool.api().increment_nonce(Alice.into());
pool.api().increment_nonce(Bob.into());
let from_alice_watcher;
let from_bob_watcher;
let b1_header;
let c1_header;
// block B1
{
from_alice_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true);
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
b1_header = header;
}
// block C1
{
from_bob_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true);
assert_eq!(pool.status().ready, 2);
log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header);
c1_header = header;
}
{
// phase-0
let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 1);
}
{
// phase-1
let event = ChainEvent::NewBestBlock { hash: c1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
}
{
// phase-2
let event = ChainEvent::Finalized { hash: b1_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
// phase-3
let event = ChainEvent::Finalized { hash: c1_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
let mut stream = futures::executor::block_on_stream(from_alice_watcher);
//phase-0
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
//phase-2
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_bob_watcher);
//phase-0
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
//phase-1
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash())));
//phase-3
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash())));
assert_eq!(stream.next(), None);
}
}
#[test]
fn best_block_after_finalization_does_not_retract() {
sp_tracing::try_init_simple();
let api = TestApi::empty();
// starting block A1 (last finalized.)
let a_header = api.push_block(1, vec![], true);
let pool = create_basic_pool(api);
let from_alice = uxt(Alice, 1);
let from_bob = uxt(Bob, 2);
pool.api().increment_nonce(Alice.into());
pool.api().increment_nonce(Bob.into());
let from_alice_watcher;
let from_bob_watcher;
let b1_header;
let c1_header;
// block B1
{
from_alice_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true);
assert_eq!(pool.status().ready, 1);
log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header);
b1_header = header;
}
// block C1
{
from_bob_watcher =
block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()))
.expect("1. Imported");
let header =
pool.api()
.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true);
assert_eq!(pool.status().ready, 2);
log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header);
c1_header = header;
}
{
let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) };
block_on(pool.maintain(event));
}
{
let event = ChainEvent::Finalized {
hash: c1_header.hash(),
tree_route: Arc::from(vec![a_header.hash(), b1_header.hash()]),
};
block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
}
{
let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None };
block_on(pool.maintain(event));
}
{
let mut stream = futures::executor::block_on_stream(from_alice_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash())));
assert_eq!(stream.next(), None);
}
{
let mut stream = futures::executor::block_on_stream(from_bob_watcher);
assert_eq!(stream.next(), Some(TransactionStatus::Ready));
assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash())));
assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash())));
assert_eq!(stream.next(), None);
}
}
@@ -176,6 +176,13 @@ pub struct TreeRoute<Block: BlockT> {
}
impl<Block: BlockT> TreeRoute<Block> {
/// Creates a new `TreeRoute`.
///
/// It is required that `pivot >= route.len()`, otherwise it may panics.
pub fn new(route: Vec<HashAndNumber<Block>>, pivot: usize) -> Self {
TreeRoute { route, pivot }
}
/// Get a slice of all retracted blocks in reverse order (towards common ancestor).
pub fn retracted(&self) -> &[HashAndNumber<Block>] {
&self.route[..self.pivot]
@@ -22,7 +22,7 @@
use codec::Encode;
use futures::future::ready;
use parking_lot::RwLock;
use sp_blockchain::CachedHeaderMetadata;
use sp_blockchain::{CachedHeaderMetadata, TreeRoute};
use sp_runtime::{
generic::{self, BlockId},
traits::{
@@ -335,6 +335,14 @@ impl sc_transaction_pool::ChainApi for TestApi {
self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()),
})
}
fn tree_route(
&self,
from: <Self::Block as BlockT>::Hash,
to: <Self::Block as BlockT>::Hash,
) -> Result<TreeRoute<Self::Block>, Self::Error> {
sp_blockchain::tree_route::<Block, TestApi>(self, from, to).map_err(Into::into)
}
}
impl sp_blockchain::HeaderMetadata<Block> for TestApi {