Check block CLI command (#4240)

* Check block operation

* Update client/cli/src/lib.rs

* Update client/cli/src/params.rs
This commit is contained in:
Arkadiy Paronyan
2019-11-28 12:24:28 +01:00
committed by Gavin Wood
parent f78b83e363
commit 504e2f8bd5
22 changed files with 543 additions and 456 deletions
+2
View File
@@ -44,6 +44,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: VersionInfo) -> error::Result<()>
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder(|config: Config<_>|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::CheckBlock(cmd) => cmd.run_with_builder(|config: Config<_>|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec),
ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder(|config: Config<_>|
Ok(new_full_start!(config).0), load_spec),
+2
View File
@@ -136,6 +136,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: substrate_cli::VersionInfo) -> er
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder(|config: Config<_, _>|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::CheckBlock(cmd) => cmd.run_with_builder(|config: Config<_, _>|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec),
ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder(|config: Config<_, _>|
Ok(new_full_start!(config).0), load_spec),
+1
View File
@@ -568,6 +568,7 @@ mod tests {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
block_import.import_block(params, Default::default())
+125 -74
View File
@@ -29,7 +29,7 @@ pub mod informant;
use client_api::execution_extensions::ExecutionStrategies;
use service::{
config::{Configuration, DatabaseConfig},
ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert,
ServiceBuilderCommand,
RuntimeGenesis, ChainSpecExtension, PruningMode, ChainSpec,
};
use network::{
@@ -54,7 +54,7 @@ pub use structopt::clap::App;
use params::{
RunCmd, PurgeChainCmd, RevertCmd, ImportBlocksCmd, ExportBlocksCmd, BuildSpecCmd,
NetworkConfigurationParams, MergeParameters, TransactionPoolParams,
NodeKeyParams, NodeKeyType, Cors,
NodeKeyParams, NodeKeyType, Cors, CheckBlockCmd,
};
pub use params::{NoCustom, CoreParams, SharedParams, ExecutionStrategy as ExecutionStrategyParam};
pub use traits::{GetLogFilter, AugmentClap};
@@ -64,6 +64,8 @@ use lazy_static::lazy_static;
use futures::{Future, FutureExt, TryFutureExt};
use futures01::{Async, Future as _};
use substrate_telemetry::TelemetryEndpoints;
use sr_primitives::generic::BlockId;
use sr_primitives::traits::Block as BlockT;
/// default sub directory to store network config
const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network";
@@ -231,6 +233,9 @@ where
params::CoreParams::ImportBlocks(params) => ParseAndPrepare::ImportBlocks(
ParseAndPrepareImport { params, version }
),
params::CoreParams::CheckBlock(params) => ParseAndPrepare::CheckBlock(
CheckBlock { params, version }
),
params::CoreParams::PurgeChain(params) => ParseAndPrepare::PurgeChain(
ParseAndPreparePurge { params, version }
),
@@ -263,6 +268,8 @@ pub enum ParseAndPrepare<'a, CC, RP> {
ExportBlocks(ParseAndPrepareExport<'a>),
/// Command ready to import the chain.
ImportBlocks(ParseAndPrepareImport<'a>),
/// Command to check a block.
CheckBlock(CheckBlock<'a>),
/// Command ready to purge the chain.
PurgeChain(ParseAndPreparePurge<'a>),
/// Command ready to revert the chain.
@@ -366,7 +373,7 @@ impl<'a> ParseAndPrepareExport<'a> {
) -> error::Result<()>
where S: FnOnce(&str) -> Result<Option<ChainSpec<G, E>>, String>,
F: FnOnce(Configuration<C, G, E>) -> Result<B, error::Error>,
B: ServiceBuilderExport,
B: ServiceBuilderCommand,
C: Default,
G: RuntimeGenesis,
E: ChainSpecExtension,
@@ -427,19 +434,13 @@ impl<'a> ParseAndPrepareImport<'a> {
) -> error::Result<()>
where S: FnOnce(&str) -> Result<Option<ChainSpec<G, E>>, String>,
F: FnOnce(Configuration<C, G, E>) -> Result<B, error::Error>,
B: ServiceBuilderImport,
B: ServiceBuilderCommand,
C: Default,
G: RuntimeGenesis,
E: ChainSpecExtension,
Exit: IntoExit
{
let mut config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?;
config.wasm_method = self.params.wasm_method.into();
config.execution_strategies = ExecutionStrategies {
importing: self.params.execution.into(),
other: self.params.execution.into(),
..Default::default()
};
let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?;
let file: Box<dyn ReadPlusSeek + Send> = match self.params.input {
Some(filename) => Box::new(File::open(filename)?),
@@ -461,7 +462,7 @@ impl<'a> ParseAndPrepareImport<'a> {
let _ = exit_send.send(());
});
let mut import_fut = builder(config)?.import_blocks(file);
let mut import_fut = builder(config)?.import_blocks(file, false);
let fut = futures01::future::poll_fn(|| {
if exit_recv.try_recv().is_ok() {
return Ok(Async::Ready(()));
@@ -475,6 +476,49 @@ impl<'a> ParseAndPrepareImport<'a> {
}
}
/// Command to check a block.
pub struct CheckBlock<'a> {
params: CheckBlockCmd,
version: &'a VersionInfo,
}
impl<'a> CheckBlock<'a> {
/// Runs the command and imports to the chain.
pub fn run_with_builder<C, G, E, F, B, S, Exit>(
self,
builder: F,
spec_factory: S,
_exit: Exit,
) -> error::Result<()>
where S: FnOnce(&str) -> Result<Option<ChainSpec<G, E>>, String>,
F: FnOnce(Configuration<C, G, E>) -> Result<B, error::Error>,
B: ServiceBuilderCommand,
<<B as ServiceBuilderCommand>::Block as BlockT>::Hash: FromStr,
C: Default,
G: RuntimeGenesis,
E: ChainSpecExtension,
Exit: IntoExit
{
let config = create_config_with_db_path(spec_factory, &self.params.shared_params, self.version)?;
let input = if self.params.input.starts_with("0x") { &self.params.input[2..] } else { &self.params.input[..] };
let block_id = match FromStr::from_str(input) {
Ok(hash) => BlockId::hash(hash),
Err(_) => match self.params.input.parse::<u32>() {
Ok(n) => BlockId::number((n as u32).into()),
Err(_) => return Err(error::Error::Input("Invalid hash or number specified".into())),
}
};
let start = std::time::Instant::now();
let check = builder(config)?.check_block(block_id);
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.block_on(check)?;
println!("Completed in {} ms.", start.elapsed().as_millis());
Ok(())
}
}
/// Command ready to purge the chain.
pub struct ParseAndPreparePurge<'a> {
params: PurgeChainCmd,
@@ -548,7 +592,7 @@ impl<'a> ParseAndPrepareRevert<'a> {
) -> error::Result<()> where
S: FnOnce(&str) -> Result<Option<ChainSpec<G, E>>, String>,
F: FnOnce(Configuration<C, G, E>) -> Result<B, error::Error>,
B: ServiceBuilderRevert,
B: ServiceBuilderCommand,
C: Default,
G: RuntimeGenesis,
E: ChainSpecExtension,
@@ -694,6 +738,55 @@ fn fill_config_keystore_password<C, G, E>(
Ok(())
}
fn fill_shared_config<C, G, E>(config: &mut Configuration<C, G, E>, cli: &SharedParams, role: service::Roles)
-> error::Result<()>
where
C: Default,
G: RuntimeGenesis,
E: ChainSpecExtension,
{
config.database = DatabaseConfig::Path {
path: config.in_chain_config_dir(DEFAULT_DB_CONFIG_PATH).expect("We provided a base_path."),
cache_size: Some(cli.database_cache_size),
};
config.state_cache_size = cli.state_cache_size;
// by default we disable pruning if the node is an authority (i.e.
// `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the
// node is an authority and pruning is enabled explicitly, then we error
// unless `unsafe_pruning` is set.
config.pruning = match &cli.pruning {
Some(ref s) if s == "archive" => PruningMode::ArchiveAll,
None if role == service::Roles::AUTHORITY => PruningMode::ArchiveAll,
None => PruningMode::default(),
Some(s) => {
if role == service::Roles::AUTHORITY && !cli.unsafe_pruning {
return Err(error::Error::Input(
"Validators should run with state pruning disabled (i.e. archive). \
You can ignore this check with `--unsafe-pruning`.".to_string()
));
}
PruningMode::keep_blocks(s.parse()
.map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))?
)
},
};
config.wasm_method = cli.wasm_method.into();
let exec = &cli.execution_strategies;
let exec_all_or = |strat: params::ExecutionStrategy| exec.execution.unwrap_or(strat).into();
config.execution_strategies = ExecutionStrategies {
syncing: exec_all_or(exec.execution_syncing),
importing: exec_all_or(exec.execution_import_block),
block_construction: exec_all_or(exec.execution_block_construction),
offchain_worker: exec_all_or(exec.execution_offchain_worker),
other: exec_all_or(exec.execution_other),
};
Ok(())
}
fn create_run_node_config<C, G, E, S>(
cli: RunCmd, spec_factory: S, impl_name: &'static str, version: &VersionInfo,
) -> error::Result<Configuration<C, G, E>>
@@ -709,6 +802,19 @@ where
fill_config_keystore_password(&mut config, &cli)?;
let is_dev = cli.shared_params.dev;
let is_authority = cli.validator || cli.sentry || is_dev || cli.keyring.account.is_some();
let role =
if cli.light {
service::Roles::LIGHT
} else if is_authority {
service::Roles::AUTHORITY
} else {
service::Roles::FULL
};
fill_shared_config(&mut config, &cli.shared_params, role)?;
config.impl_name = impl_name;
config.impl_commit = version.commit;
config.impl_version = version.version;
@@ -731,61 +837,9 @@ where
config.keystore_path = cli.keystore_path.or_else(|| config.in_chain_config_dir(DEFAULT_KEYSTORE_CONFIG_PATH));
config.database = DatabaseConfig::Path {
path: config.in_chain_config_dir(DEFAULT_DB_CONFIG_PATH).expect("We provided a base_path."),
cache_size: Some(cli.database_cache_size),
};
config.state_cache_size = cli.state_cache_size;
let is_dev = cli.shared_params.dev;
let is_authority = cli.validator || cli.sentry || is_dev || cli.keyring.account.is_some();
let role =
if cli.light {
service::Roles::LIGHT
} else if is_authority {
service::Roles::AUTHORITY
} else {
service::Roles::FULL
};
// set sentry mode (i.e. act as an authority but **never** actively participate)
config.sentry_mode = cli.sentry;
// by default we disable pruning if the node is an authority (i.e.
// `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the
// node is an authority and pruning is enabled explicitly, then we error
// unless `unsafe_pruning` is set.
config.pruning = match cli.pruning {
Some(ref s) if s == "archive" => PruningMode::ArchiveAll,
None if role == service::Roles::AUTHORITY => PruningMode::ArchiveAll,
None => PruningMode::default(),
Some(s) => {
if role == service::Roles::AUTHORITY && !cli.unsafe_pruning {
return Err(error::Error::Input(
"Validators should run with state pruning disabled (i.e. archive). \
You can ignore this check with `--unsafe-pruning`.".to_string()
));
}
PruningMode::keep_blocks(s.parse()
.map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))?
)
},
};
config.wasm_method = cli.wasm_method.into();
let exec = cli.execution_strategies;
let exec_all_or = |strat: params::ExecutionStrategy| exec.execution.unwrap_or(strat).into();
config.execution_strategies = ExecutionStrategies {
syncing: exec_all_or(exec.execution_syncing),
importing: exec_all_or(exec.execution_import_block),
block_construction: exec_all_or(exec.execution_block_construction),
offchain_worker: exec_all_or(exec.execution_offchain_worker),
other: exec_all_or(exec.execution_other),
};
config.offchain_worker = match (cli.offchain_worker, role) {
(params::OffchainWorkerEnabled::WhenValidating, service::Roles::AUTHORITY) => true,
(params::OffchainWorkerEnabled::Always, _) => true,
@@ -871,11 +925,7 @@ where
let base_path = base_path(cli, version);
let mut config = service::Configuration::default_with_spec_and_base_path(spec.clone(), Some(base_path));
config.database = DatabaseConfig::Path {
path: config.in_chain_config_dir(DEFAULT_DB_CONFIG_PATH).expect("We provided a base_path."),
cache_size: None,
};
fill_shared_config(&mut config, &cli, service::Roles::FULL)?;
Ok(config)
}
@@ -943,14 +993,15 @@ fn init_logger(pattern: &str) {
)
};
if !enable_color {
output = kill_color(output.as_ref());
}
if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) {
// duplicate INFO/WARN output to console
println!("{}", output);
}
if !enable_color {
output = kill_color(output.as_ref());
}
writeln!(buf, "{}", output)
});
+69 -59
View File
@@ -46,7 +46,7 @@ impl Into<client_api::ExecutionStrategy> for ExecutionStrategy {
arg_enum! {
/// How to execute Wasm runtime code
#[allow(missing_docs)]
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Copy)]
pub enum WasmExecutionMethod {
// Uses an interpreter.
Interpreted,
@@ -109,6 +109,44 @@ pub struct SharedParams {
/// Sets a custom logging filter.
#[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")]
pub log: Option<String>,
/// Specify the state pruning mode, a number of blocks to keep or 'archive'.
///
/// Default is to keep all block states if the node is running as a
/// validator (i.e. 'archive'), otherwise state is only kept for the last
/// 256 blocks.
#[structopt(long = "pruning", value_name = "PRUNING_MODE")]
pub pruning: Option<String>,
/// Force start with unsafe pruning settings.
///
/// When running as a validator it is highly recommended to disable state
/// pruning (i.e. 'archive') which is the default. The node will refuse to
/// start as a validator if pruning is enabled unless this option is set.
#[structopt(long = "unsafe-pruning")]
pub unsafe_pruning: bool,
/// Method for executing Wasm runtime code.
#[structopt(
long = "wasm-execution",
value_name = "METHOD",
possible_values = &WasmExecutionMethod::enabled_variants(),
case_insensitive = true,
default_value = "Interpreted"
)]
pub wasm_method: WasmExecutionMethod,
#[allow(missing_docs)]
#[structopt(flatten)]
pub execution_strategies: ExecutionStrategies,
/// Limit the memory the database cache can use.
#[structopt(long = "db-cache", value_name = "MiB", default_value = "1024")]
pub database_cache_size: u32,
/// Specify the state cache size.
#[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")]
pub state_cache_size: usize,
}
impl GetLogFilter for SharedParams {
@@ -386,14 +424,6 @@ pub struct RunCmd {
#[structopt(long = "light")]
pub light: bool,
/// Limit the memory the database cache can use.
#[structopt(long = "db-cache", value_name = "MiB", default_value = "1024")]
pub database_cache_size: u32,
/// Specify the state cache size.
#[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")]
pub state_cache_size: usize,
/// Listen to all RPC interfaces.
///
/// Default is local.
@@ -438,22 +468,6 @@ pub struct RunCmd {
#[structopt(long = "grafana-port", value_name = "PORT")]
pub grafana_port: Option<u16>,
/// Specify the state pruning mode, a number of blocks to keep or 'archive'.
///
/// Default is to keep all block states if the node is running as a
/// validator (i.e. 'archive'), otherwise state is only kept for the last
/// 256 blocks.
#[structopt(long = "pruning", value_name = "PRUNING_MODE")]
pub pruning: Option<String>,
/// Force start with unsafe pruning settings.
///
/// When running as a validator it is highly recommended to disable state
/// pruning (i.e. 'archive') which is the default. The node will refuse to
/// start as a validator if pruning is enabled unless this option is set.
#[structopt(long = "unsafe-pruning")]
pub unsafe_pruning: bool,
/// The human-readable name for this node.
///
/// The node name will be reported to the telemetry server, if enabled.
@@ -487,20 +501,6 @@ pub struct RunCmd {
)]
pub offchain_worker: OffchainWorkerEnabled,
/// Method for executing Wasm runtime code.
#[structopt(
long = "wasm-execution",
value_name = "METHOD",
possible_values = &WasmExecutionMethod::enabled_variants(),
case_insensitive = true,
default_value = "Interpreted"
)]
pub wasm_method: WasmExecutionMethod,
#[allow(missing_docs)]
#[structopt(flatten)]
pub execution_strategies: ExecutionStrategies,
#[allow(missing_docs)]
#[structopt(flatten)]
pub shared_params: SharedParams,
@@ -764,30 +764,30 @@ pub struct ImportBlocksCmd {
#[allow(missing_docs)]
#[structopt(flatten)]
pub shared_params: SharedParams,
/// Method for executing Wasm runtime code.
#[structopt(
long = "wasm-execution",
value_name = "METHOD",
possible_values = &WasmExecutionMethod::variants(),
case_insensitive = true,
default_value = "Interpreted"
)]
pub wasm_method: WasmExecutionMethod,
/// The means of execution used when calling into the runtime while importing blocks.
#[structopt(
long = "execution",
value_name = "STRATEGY",
possible_values = &ExecutionStrategy::variants(),
case_insensitive = true,
default_value = "NativeElseWasm"
)]
pub execution: ExecutionStrategy,
}
impl_get_log_filter!(ImportBlocksCmd);
/// The `check-block` command used to validate blocks.
#[derive(Debug, StructOpt, Clone)]
pub struct CheckBlockCmd {
/// Block hash or number
#[structopt(value_name = "HASH or NUMBER")]
pub input: String,
/// The default number of 64KB pages to ever allocate for Wasm execution.
///
/// Don't alter this unless you know what you're doing.
#[structopt(long = "default-heap-pages", value_name = "COUNT")]
pub default_heap_pages: Option<u32>,
#[allow(missing_docs)]
#[structopt(flatten)]
pub shared_params: SharedParams,
}
impl_get_log_filter!(CheckBlockCmd);
/// The `revert` command used revert the chain to a previous state.
#[derive(Debug, StructOpt, Clone)]
pub struct RevertCmd {
@@ -835,6 +835,9 @@ pub enum CoreParams<CC, RP> {
/// Import blocks from file.
ImportBlocks(ImportBlocksCmd),
/// Validte a single block.
CheckBlock(CheckBlockCmd),
/// Revert chain to the previous state.
Revert(RevertCmd),
@@ -868,6 +871,10 @@ impl<CC, RP> StructOpt for CoreParams<CC, RP> where
ImportBlocksCmd::augment_clap(SubCommand::with_name("import-blocks"))
.about("Import blocks from file.")
)
.subcommand(
CheckBlockCmd::augment_clap(SubCommand::with_name("check-block"))
.about("Re-validate a known block.")
)
.subcommand(
RevertCmd::augment_clap(SubCommand::with_name("revert"))
.about("Revert chain to the previous state.")
@@ -886,6 +893,8 @@ impl<CC, RP> StructOpt for CoreParams<CC, RP> where
CoreParams::ExportBlocks(ExportBlocksCmd::from_clap(matches)),
("import-blocks", Some(matches)) =>
CoreParams::ImportBlocks(ImportBlocksCmd::from_clap(matches)),
("check-block", Some(matches)) =>
CoreParams::CheckBlock(CheckBlockCmd::from_clap(matches)),
("revert", Some(matches)) => CoreParams::Revert(RevertCmd::from_clap(matches)),
("purge-chain", Some(matches)) =>
CoreParams::PurgeChain(PurgeChainCmd::from_clap(matches)),
@@ -902,6 +911,7 @@ impl<CC, RP> GetLogFilter for CoreParams<CC, RP> where CC: GetLogFilter {
CoreParams::BuildSpec(c) => c.get_log_filter(),
CoreParams::ExportBlocks(c) => c.get_log_filter(),
CoreParams::ImportBlocks(c) => c.get_log_filter(),
CoreParams::CheckBlock(c) => c.get_log_filter(),
CoreParams::PurgeChain(c) => c.get_log_filter(),
CoreParams::Revert(c) => c.get_log_filter(),
CoreParams::Custom(c) => c.get_log_filter(),
@@ -281,6 +281,7 @@ impl<H, B, C, E, I, P, Error, SO> slots::SimpleSlotWorker<B> for AuraWorker<C, E
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
}
})
}
@@ -590,6 +591,7 @@ impl<B: BlockT, C, P, T> Verifier<B> for AuraVerifier<C, P, T> where
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
Ok((block_import_params, maybe_keys))
@@ -436,6 +436,7 @@ impl<B, C, E, I, Error, SO> slots::SimpleSlotWorker<B> for BabeWorker<B, C, E, I
// https://github.com/paritytech/substrate/issues/3623
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
}
})
}
@@ -775,6 +776,7 @@ impl<B, E, Block, RA, PRA> Verifier<Block> for BabeVerifier<B, E, Block, RA, PRA
// https://github.com/paritytech/substrate/issues/3623
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
Ok((block_import_params, Default::default()))
@@ -579,6 +579,7 @@ fn propose_and_import_block(
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
},
Default::default(),
).unwrap();
@@ -304,6 +304,7 @@ impl<B: BlockT<Hash=H256>, C, S, Algorithm> Verifier<B> for PowVerifier<B, C, S,
auxiliary: vec![(key, Some(aux.encode()))],
fork_choice: ForkChoiceStrategy::Custom(aux.total_difficulty > best_aux.total_difficulty),
allow_missing_state: false,
import_existing: false,
};
Ok((import_block, None))
@@ -532,6 +533,7 @@ fn mine_loop<B: BlockT<Hash=H256>, C, Algorithm, E, SO, S>(
auxiliary: vec![(key, Some(aux.encode()))],
fork_choice: ForkChoiceStrategy::Custom(true),
allow_missing_state: false,
import_existing: false,
};
block_import.import_block(import_block, HashMap::default())
@@ -659,6 +659,7 @@ pub mod tests {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: true,
import_existing: false,
};
do_import_block::<_, _, _, TestJustification>(
&client,
@@ -975,6 +975,7 @@ fn allows_reimporting_change_blocks() {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
}
};
@@ -1028,6 +1029,7 @@ fn test_bad_justification() {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
}
};
@@ -1738,6 +1740,7 @@ fn imports_justification_for_regular_blocks_on_import() {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
assert_eq!(
@@ -662,6 +662,7 @@ impl<B: BlockT> ChainSync<B> {
justification: block_data.block.justification,
origin: block_data.origin,
allow_missing_state: false,
import_existing: false,
}
}).collect()
}
@@ -675,6 +676,7 @@ impl<B: BlockT> ChainSync<B> {
justification: b.justification,
origin: Some(who.clone()),
allow_missing_state: true,
import_existing: false,
}
}).collect()
}
@@ -41,6 +41,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock<Block>)
justification,
origin: Some(peer_id.clone()),
allow_missing_state: false,
import_existing: false,
})
}
+1
View File
@@ -99,6 +99,7 @@ impl<B: BlockT> Verifier<B> for PassThroughVerifier {
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
}, maybe_keys))
}
}
+13 -86
View File
@@ -25,7 +25,6 @@ use client_api::{
};
use client::Client;
use chain_spec::{RuntimeGenesis, Extension};
use codec::{Decode, Encode, IoReader};
use consensus_common::import_queue::ImportQueue;
use futures::{prelude::*, sync::mpsc};
use futures03::{
@@ -44,8 +43,7 @@ use rpc;
use sr_api::ConstructRuntimeApi;
use sr_primitives::generic::BlockId;
use sr_primitives::traits::{
Block as BlockT, ProvideRuntimeApi, NumberFor, One,
Zero, Header, SaturatedConversion,
Block as BlockT, ProvideRuntimeApi, NumberFor, Header, SaturatedConversion,
};
use substrate_executor::{NativeExecutor, NativeExecutionDispatch};
use std::{
@@ -80,12 +78,12 @@ pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, TCl, TFchr, TSc, TImp
TNetP, TExPool, TRpc, Backend>
{
config: Configuration<TCfg, TGen, TCSExt>,
client: Arc<TCl>,
pub (crate) client: Arc<TCl>,
backend: Arc<Backend>,
keystore: Arc<RwLock<Keystore>>,
fetcher: Option<TFchr>,
select_chain: Option<TSc>,
import_queue: TImpQu,
pub (crate) import_queue: TImpQu,
finality_proof_request_builder: Option<TFprb>,
finality_proof_provider: Option<TFpp>,
network_protocol: TNetP,
@@ -660,21 +658,17 @@ impl<TBl, TRtApi, TCfg, TGen, TCSExt, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNet
}
}
/// Implemented on `ServiceBuilder`. Allows importing blocks once you have given all the required
/// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate
/// components to the builder.
pub trait ServiceBuilderImport {
pub trait ServiceBuilderCommand {
/// Block type this API operates on.
type Block: BlockT;
/// Starts the process of importing blocks.
fn import_blocks(
self,
input: impl Read + Seek + Send + 'static,
force: bool,
) -> Box<dyn Future<Item = (), Error = Error> + Send>;
}
/// Implemented on `ServiceBuilder`. Allows exporting blocks once you have given all the required
/// components to the builder.
pub trait ServiceBuilderExport {
/// Type of block of the builder.
type Block: BlockT;
/// Performs the blocks export.
fn export_blocks(
@@ -684,85 +678,18 @@ pub trait ServiceBuilderExport {
to: Option<NumberFor<Self::Block>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>>;
}
/// Implemented on `ServiceBuilder`. Allows reverting the chain once you have given all the
/// required components to the builder.
pub trait ServiceBuilderRevert {
/// Type of block of the builder.
type Block: BlockT;
/// Performs a revert of `blocks` bocks.
/// Performs a revert of `blocks` blocks.
fn revert_chain(
&self,
blocks: NumberFor<Self::Block>
) -> Result<(), Error>;
}
impl<
TBl, TRtApi, TCfg, TGen, TCSExt, TBackend,
TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP,
TExPool, TRpc, Backend
> ServiceBuilderImport for ServiceBuilder<
TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend
> where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TImpQu: 'static + ImportQueue<TBl>,
TRtApi: 'static + Send + Sync,
{
fn import_blocks(
/// Re-validate known block.
fn check_block(
self,
input: impl Read + Seek + Send + 'static,
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
let client = self.client;
let mut queue = self.import_queue;
Box::new(import_blocks!(TBl, client, queue, input).compat())
}
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
ServiceBuilderExport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TRtApi: 'static + Send + Sync,
{
type Block = TBl;
fn export_blocks(
self,
mut output: impl Write + 'static,
from: NumberFor<TBl>,
to: Option<NumberFor<TBl>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>> {
let client = self.client;
Box::new(export_blocks!(client, output, from, to, json).compat())
}
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
ServiceBuilderRevert for ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone
{
type Block = TBl;
fn revert_chain(
&self,
blocks: NumberFor<TBl>
) -> Result<(), Error> {
let client = &self.client;
revert_chain!(client, blocks)
}
block: BlockId<Self::Block>
) -> Box<dyn Future<Item = (), Error = Error> + Send>;
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TSc, TImpQu, TNetP, TExPool, TRpc>
+283 -225
View File
@@ -17,234 +17,29 @@
//! Chain utilities.
use crate::error;
use crate::builder::{ServiceBuilderCommand, ServiceBuilder};
use crate::error::Error;
use chain_spec::{ChainSpec, RuntimeGenesis, Extension};
use log::{warn, info};
use futures::{future, prelude::*};
use futures03::{
TryFutureExt as _,
};
use primitives::{Blake2Hasher, Hasher};
use sr_primitives::traits::{
Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion
};
use sr_primitives::generic::{BlockId, SignedBlock};
use codec::{Decode, Encode, IoReader};
use client::Client;
use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue};
use consensus_common::BlockOrigin;
/// Defines the logic for an operation exporting blocks within a range.
#[macro_export]
/// Export blocks
macro_rules! export_blocks {
($client:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{
let mut block = $from;
use std::{
io::{Read, Write, Seek},
};
let last = match $to {
Some(v) if v.is_zero() => One::one(),
Some(v) => v,
None => $client.info().chain.best_number,
};
let mut wrote_header = false;
// Exporting blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we write a block to the output, the `Future` re-schedules itself and returns
// `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block exports,
// or to stop the operation completely.
futures03::future::poll_fn(move |cx| {
if last < block {
return std::task::Poll::Ready(Err("Invalid block range specified".into()));
}
if !wrote_header {
info!("Exporting blocks from #{} to #{}", block, last);
if !$json {
let last_: u64 = last.saturated_into::<u64>();
let block_: u64 = block.saturated_into::<u64>();
let len: u64 = last_ - block_ + 1;
$output.write_all(&len.encode())?;
}
wrote_header = true;
}
match $client.block(&BlockId::number(block))? {
Some(block) => {
if $json {
serde_json::to_writer(&mut $output, &block)
.map_err(|e| format!("Error writing JSON: {}", e))?;
} else {
$output.write_all(&block.encode())?;
}
},
// Reached end of the chain.
None => return std::task::Poll::Ready(Ok(())),
}
if (block % 10000.into()).is_zero() {
info!("#{}", block);
}
if block == last {
return std::task::Poll::Ready(Ok(()));
}
block += One::one();
// Re-schedule the task in order to continue the operation.
cx.waker().wake_by_ref();
std::task::Poll::Pending
})
}}
}
/// Defines the logic for an operation importing blocks from some known import.
#[macro_export]
/// Import blocks
macro_rules! import_blocks {
($block:ty, $client:ident, $queue:ident, $input:ident) => {{
use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult};
use consensus_common::BlockOrigin;
use network::message;
use sr_primitives::generic::SignedBlock;
use sr_primitives::traits::Block;
struct WaitLink {
imported_blocks: u64,
has_error: bool,
}
impl WaitLink {
fn new() -> WaitLink {
WaitLink {
imported_blocks: 0,
has_error: false,
}
}
}
impl<B: Block> Link<B> for WaitLink {
fn blocks_processed(
&mut self,
imported: usize,
_count: usize,
results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)>
) {
self.imported_blocks += imported as u64;
for result in results {
if let (Err(err), hash) = result {
warn!("There was an error importing block with hash {:?}: {:?}", hash, err);
self.has_error = true;
break;
}
}
}
}
let mut io_reader_input = IoReader($input);
let mut count = None::<u64>;
let mut read_block_count = 0;
let mut link = WaitLink::new();
// Importing blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we read a block from the input or import a bunch of blocks from the import
// queue, the `Future` re-schedules itself and returns `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block imports,
// or to stop the operation completely.
futures03::future::poll_fn(move |cx| {
// Start by reading the number of blocks if not done so already.
let count = match count {
Some(c) => c,
None => {
let c: u64 = match Decode::decode(&mut io_reader_input) {
Ok(c) => c,
Err(err) => {
let err = format!("Error reading file: {}", err);
return std::task::Poll::Ready(Err(From::from(err)));
},
};
info!("Importing {} blocks", c);
count = Some(c);
c
}
};
// Read blocks from the input.
if read_block_count < count {
match SignedBlock::<$block>::decode(&mut io_reader_input) {
Ok(signed) => {
let (header, extrinsics) = signed.block.deconstruct();
let hash = header.hash();
let block = message::BlockData::<$block> {
hash,
justification: signed.justification,
header: Some(header),
body: Some(extrinsics),
receipt: None,
message_queue: None
};
// import queue handles verification and importing it into the client
$queue.import_blocks(BlockOrigin::File, vec![
IncomingBlock::<$block> {
hash: block.hash,
header: block.header,
body: block.body,
justification: block.justification,
origin: None,
allow_missing_state: false,
}
]);
}
Err(e) => {
warn!("Error reading block data at {}: {}", read_block_count, e);
return std::task::Poll::Ready(Ok(()));
}
}
read_block_count += 1;
if read_block_count % 1000 == 0 {
info!("#{} blocks were added to the queue", read_block_count);
}
cx.waker().wake_by_ref();
return std::task::Poll::Pending;
}
let blocks_before = link.imported_blocks;
$queue.poll_actions(cx, &mut link);
if link.has_error {
info!(
"Stopping after #{} blocks because of an error",
link.imported_blocks,
);
return std::task::Poll::Ready(Ok(()));
}
if link.imported_blocks / 1000 != blocks_before / 1000 {
info!(
"#{} blocks were imported (#{} left)",
link.imported_blocks,
count - link.imported_blocks
);
}
if link.imported_blocks >= count {
info!("Imported {} blocks. Best: #{}", read_block_count, $client.info().chain.best_number);
return std::task::Poll::Ready(Ok(()));
} else {
// Polling the import queue will re-schedule the task when ready.
return std::task::Poll::Pending;
}
})
}}
}
/// Revert the chain some number of blocks.
#[macro_export]
macro_rules! revert_chain {
($client:ident, $blocks:ident) => {{
let reverted = $client.revert($blocks)?;
let info = $client.info().chain;
if reverted.is_zero() {
info!("There aren't any non-finalized blocks to revert.");
} else {
info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash);
}
Ok(())
}}
}
use network::message;
/// Build a chain spec json
pub fn build_spec<G, E>(spec: ChainSpec<G, E>, raw: bool) -> error::Result<String> where
@@ -253,3 +48,266 @@ pub fn build_spec<G, E>(spec: ChainSpec<G, E>, raw: bool) -> error::Result<Strin
{
Ok(spec.to_json(raw)?)
}
impl<
TBl, TRtApi, TCfg, TGen, TCSExt, TBackend,
TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP,
TExPool, TRpc, Backend
> ServiceBuilderCommand for ServiceBuilder<
TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend
> where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TImpQu: 'static + ImportQueue<TBl>,
TRtApi: 'static + Send + Sync,
{
type Block = TBl;
fn import_blocks(
self,
input: impl Read + Seek + Send + 'static,
force: bool,
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
struct WaitLink {
imported_blocks: u64,
has_error: bool,
}
impl WaitLink {
fn new() -> WaitLink {
WaitLink {
imported_blocks: 0,
has_error: false,
}
}
}
impl<B: BlockT> Link<B> for WaitLink {
fn blocks_processed(
&mut self,
imported: usize,
_count: usize,
results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)>
) {
self.imported_blocks += imported as u64;
for result in results {
if let (Err(err), hash) = result {
warn!("There was an error importing block with hash {:?}: {:?}", hash, err);
self.has_error = true;
break;
}
}
}
}
let client = self.client;
let mut queue = self.import_queue;
let mut io_reader_input = IoReader(input);
let mut count = None::<u64>;
let mut read_block_count = 0;
let mut link = WaitLink::new();
// Importing blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we read a block from the input or import a bunch of blocks from the import
// queue, the `Future` re-schedules itself and returns `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block imports,
// or to stop the operation completely.
let import = futures03::future::poll_fn(move |cx| {
// Start by reading the number of blocks if not done so already.
let count = match count {
Some(c) => c,
None => {
let c: u64 = match Decode::decode(&mut io_reader_input) {
Ok(c) => c,
Err(err) => {
let err = format!("Error reading file: {}", err);
return std::task::Poll::Ready(Err(From::from(err)));
},
};
info!("Importing {} blocks", c);
count = Some(c);
c
}
};
// Read blocks from the input.
if read_block_count < count {
match SignedBlock::<Self::Block>::decode(&mut io_reader_input) {
Ok(signed) => {
let (header, extrinsics) = signed.block.deconstruct();
let hash = header.hash();
let block = message::BlockData::<Self::Block> {
hash,
justification: signed.justification,
header: Some(header),
body: Some(extrinsics),
receipt: None,
message_queue: None
};
// import queue handles verification and importing it into the client
queue.import_blocks(BlockOrigin::File, vec![
IncomingBlock::<Self::Block> {
hash: block.hash,
header: block.header,
body: block.body,
justification: block.justification,
origin: None,
allow_missing_state: false,
import_existing: force,
}
]);
}
Err(e) => {
warn!("Error reading block data at {}: {}", read_block_count, e);
return std::task::Poll::Ready(Ok(()));
}
}
read_block_count += 1;
if read_block_count % 1000 == 0 {
info!("#{} blocks were added to the queue", read_block_count);
}
cx.waker().wake_by_ref();
return std::task::Poll::Pending;
}
let blocks_before = link.imported_blocks;
queue.poll_actions(cx, &mut link);
if link.has_error {
info!(
"Stopping after #{} blocks because of an error",
link.imported_blocks,
);
return std::task::Poll::Ready(Ok(()));
}
if link.imported_blocks / 1000 != blocks_before / 1000 {
info!(
"#{} blocks were imported (#{} left)",
link.imported_blocks,
count - link.imported_blocks
);
}
if link.imported_blocks >= count {
info!("Imported {} blocks. Best: #{}", read_block_count, client.info().chain.best_number);
return std::task::Poll::Ready(Ok(()));
} else {
// Polling the import queue will re-schedule the task when ready.
return std::task::Poll::Pending;
}
});
Box::new(import.compat())
}
fn export_blocks(
self,
mut output: impl Write + 'static,
from: NumberFor<TBl>,
to: Option<NumberFor<TBl>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>> {
let client = self.client;
let mut block = from;
let last = match to {
Some(v) if v.is_zero() => One::one(),
Some(v) => v,
None => client.info().chain.best_number,
};
let mut wrote_header = false;
// Exporting blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we write a block to the output, the `Future` re-schedules itself and returns
// `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block exports,
// or to stop the operation completely.
let export = futures03::future::poll_fn(move |cx| {
if last < block {
return std::task::Poll::Ready(Err("Invalid block range specified".into()));
}
if !wrote_header {
info!("Exporting blocks from #{} to #{}", block, last);
if !json {
let last_: u64 = last.saturated_into::<u64>();
let block_: u64 = block.saturated_into::<u64>();
let len: u64 = last_ - block_ + 1;
output.write_all(&len.encode())?;
}
wrote_header = true;
}
match client.block(&BlockId::number(block))? {
Some(block) => {
if json {
serde_json::to_writer(&mut output, &block)
.map_err(|e| format!("Error writing JSON: {}", e))?;
} else {
output.write_all(&block.encode())?;
}
},
// Reached end of the chain.
None => return std::task::Poll::Ready(Ok(())),
}
if (block % 10000.into()).is_zero() {
info!("#{}", block);
}
if block == last {
return std::task::Poll::Ready(Ok(()));
}
block += One::one();
// Re-schedule the task in order to continue the operation.
cx.waker().wake_by_ref();
std::task::Poll::Pending
});
Box::new(export.compat())
}
fn revert_chain(
&self,
blocks: NumberFor<TBl>
) -> Result<(), Error> {
let reverted = self.client.revert(blocks)?;
let info = self.client.info().chain;
if reverted.is_zero() {
info!("There aren't any non-finalized blocks to revert.");
} else {
info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash);
}
Ok(())
}
fn check_block(
self,
block_id: BlockId<TBl>
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
match self.client.block(&block_id) {
Ok(Some(block)) => {
let mut buf = Vec::new();
1u64.encode_to(&mut buf);
block.encode_to(&mut buf);
let reader = std::io::Cursor::new(buf);
self.import_blocks(reader, true)
}
Ok(None) => Box::new(future::err("Unknown block".into())),
Err(e) => Box::new(future::err(format!("Error reading block: {:?}", e).into())),
}
}
}
+1 -3
View File
@@ -53,9 +53,7 @@ use sr_primitives::generic::BlockId;
use sr_primitives::traits::{NumberFor, Block as BlockT};
pub use self::error::Error;
pub use self::builder::{
ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert,
};
pub use self::builder::{ServiceBuilder, ServiceBuilderCommand};
pub use config::{Configuration, Roles, PruningMode};
pub use chain_spec::{ChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension};
pub use txpool_api::{TransactionPool, TransactionPoolMaintainer, InPoolTransaction, IntoPoolError};
+20 -9
View File
@@ -756,6 +756,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
auxiliary,
fork_choice,
allow_missing_state,
import_existing,
} = import_block;
assert!(justification.is_some() && finalized || justification.is_none());
@@ -800,6 +801,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
auxiliary,
fork_choice,
enact_state,
import_existing,
);
if let Ok(ImportResult::Imported(ref aux)) = result {
@@ -828,13 +830,17 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
fork_choice: ForkChoiceStrategy,
enact_state: bool,
import_existing: bool,
) -> sp_blockchain::Result<ImportResult> where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone,
{
let parent_hash = import_headers.post().parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(hash))? {
blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain),
blockchain::BlockStatus::Unknown => {},
let status = self.backend.blockchain().status(BlockId::Hash(hash))?;
match (import_existing, status) {
(false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
(false, blockchain::BlockStatus::Unknown) => {},
(true, blockchain::BlockStatus::InChain) => {},
(true, blockchain::BlockStatus::Unknown) => return Err(Error::UnknownBlock(format!("{:?}", hash))),
}
let info = self.backend.blockchain().info();
@@ -1454,7 +1460,7 @@ impl<'a, B, E, Block, RA> consensus::BlockImport<Block> for &'a Client<B, E, Blo
&mut self,
block: BlockCheckParams<Block>,
) -> Result<ImportResult, Self::Error> {
let BlockCheckParams { hash, number, parent_hash, allow_missing_state } = block;
let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block;
if let Some(h) = self.fork_blocks.as_ref().and_then(|x| x.get(&number)) {
if &hash != h {
@@ -1473,7 +1479,8 @@ impl<'a, B, E, Block, RA> consensus::BlockImport<Block> for &'a Client<B, E, Blo
match self.block_status(&BlockId::Hash(hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
{
BlockStatus::InChainWithState | BlockStatus::Queued => return Ok(ImportResult::AlreadyInChain),
BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => return Ok(ImportResult::AlreadyInChain),
BlockStatus::InChainWithState | BlockStatus::Queued => {},
BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain),
BlockStatus::Unknown => {},
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
@@ -2773,7 +2780,8 @@ pub(crate) mod tests {
hash: a1.hash().clone(),
number: 0,
parent_hash: a1.header().parent_hash().clone(),
allow_missing_state: false
allow_missing_state: false,
import_existing: false,
};
assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false));
@@ -2792,7 +2800,8 @@ pub(crate) mod tests {
hash: a2.hash().clone(),
number: 1,
parent_hash: a1.header().parent_hash().clone(),
allow_missing_state: false
allow_missing_state: false,
import_existing: false,
};
assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain);
@@ -2808,7 +2817,8 @@ pub(crate) mod tests {
hash: a3.hash().clone(),
number: 2,
parent_hash: a2.header().parent_hash().clone(),
allow_missing_state: false
allow_missing_state: false,
import_existing: false,
};
// a1 and a2 are both pruned at this point
@@ -2823,7 +2833,8 @@ pub(crate) mod tests {
hash: b1.hash().clone(),
number: 0,
parent_hash: b1.header().parent_hash().clone(),
allow_missing_state: false
allow_missing_state: false,
import_existing: false,
};
assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState);
check_block_b1.allow_missing_state = true;
@@ -105,6 +105,8 @@ pub struct BlockCheckParams<Block: BlockT> {
pub parent_hash: Block::Hash,
/// Allow importing the block skipping state verification if parent state is missing.
pub allow_missing_state: bool,
/// Re-validate existing block.
pub import_existing: bool,
}
/// Data required to import a Block.
@@ -142,6 +144,8 @@ pub struct BlockImportParams<Block: BlockT> {
pub fork_choice: ForkChoiceStrategy,
/// Allow importing the block skipping state verification if parent state is missing.
pub allow_missing_state: bool,
/// Re-validate existing block.
pub import_existing: bool,
}
impl<Block: BlockT> BlockImportParams<Block> {
@@ -65,6 +65,8 @@ pub struct IncomingBlock<B: BlockT> {
pub origin: Option<Origin>,
/// Allow importing the block skipping state verification if parent state is missing.
pub allow_missing_state: bool,
/// Re-validate existing block.
pub import_existing: bool,
}
/// Type of keys in the blockchain cache that consensus module could use for its needs.
@@ -230,6 +232,7 @@ pub fn import_single_block<B: BlockT, V: Verifier<B>>(
number,
parent_hash,
allow_missing_state: block.allow_missing_state,
import_existing: block.import_existing,
}))? {
BlockImportResult::ImportedUnknown { .. } => (),
r => return Ok(r), // Any other successful result means that the block is already imported.
@@ -83,6 +83,7 @@ impl<B, E, RA, Block> ClientExt<Block> for Client<B, E, Block, RA>
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
BlockImport::import_block(&mut (&*self), import, HashMap::new()).map(|_| ())
@@ -102,6 +103,7 @@ impl<B, E, RA, Block> ClientExt<Block> for Client<B, E, Block, RA>
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::Custom(true),
allow_missing_state: false,
import_existing: false,
};
BlockImport::import_block(&mut (&*self), import, HashMap::new()).map(|_| ())
@@ -121,6 +123,7 @@ impl<B, E, RA, Block> ClientExt<Block> for Client<B, E, Block, RA>
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::Custom(true),
allow_missing_state: false,
import_existing: false,
};
BlockImport::import_block(&mut (&*self), import, HashMap::new()).map(|_| ())
@@ -143,6 +146,7 @@ impl<B, E, RA, Block> ClientExt<Block> for Client<B, E, Block, RA>
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
BlockImport::import_block(&mut (&*self), import, HashMap::new()).map(|_| ())
@@ -199,6 +199,7 @@ fn import_block<Backend, Exec, Block, RtApi>(
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
allow_missing_state: false,
import_existing: false,
};
(&**client).import_block(import, HashMap::new()).expect("Failed to import block");
}