Check block CLI command (#4240)

* Check block operation

* Update client/cli/src/lib.rs

* Update client/cli/src/params.rs
This commit is contained in:
Arkadiy Paronyan
2019-11-28 12:24:28 +01:00
committed by Gavin Wood
parent f78b83e363
commit 504e2f8bd5
22 changed files with 543 additions and 456 deletions
+13 -86
View File
@@ -25,7 +25,6 @@ use client_api::{
};
use client::Client;
use chain_spec::{RuntimeGenesis, Extension};
use codec::{Decode, Encode, IoReader};
use consensus_common::import_queue::ImportQueue;
use futures::{prelude::*, sync::mpsc};
use futures03::{
@@ -44,8 +43,7 @@ use rpc;
use sr_api::ConstructRuntimeApi;
use sr_primitives::generic::BlockId;
use sr_primitives::traits::{
Block as BlockT, ProvideRuntimeApi, NumberFor, One,
Zero, Header, SaturatedConversion,
Block as BlockT, ProvideRuntimeApi, NumberFor, Header, SaturatedConversion,
};
use substrate_executor::{NativeExecutor, NativeExecutionDispatch};
use std::{
@@ -80,12 +78,12 @@ pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, TCl, TFchr, TSc, TImp
TNetP, TExPool, TRpc, Backend>
{
config: Configuration<TCfg, TGen, TCSExt>,
client: Arc<TCl>,
pub (crate) client: Arc<TCl>,
backend: Arc<Backend>,
keystore: Arc<RwLock<Keystore>>,
fetcher: Option<TFchr>,
select_chain: Option<TSc>,
import_queue: TImpQu,
pub (crate) import_queue: TImpQu,
finality_proof_request_builder: Option<TFprb>,
finality_proof_provider: Option<TFpp>,
network_protocol: TNetP,
@@ -660,21 +658,17 @@ impl<TBl, TRtApi, TCfg, TGen, TCSExt, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNet
}
}
/// Implemented on `ServiceBuilder`. Allows importing blocks once you have given all the required
/// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate
/// components to the builder.
pub trait ServiceBuilderImport {
pub trait ServiceBuilderCommand {
/// Block type this API operates on.
type Block: BlockT;
/// Starts the process of importing blocks.
fn import_blocks(
self,
input: impl Read + Seek + Send + 'static,
force: bool,
) -> Box<dyn Future<Item = (), Error = Error> + Send>;
}
/// Implemented on `ServiceBuilder`. Allows exporting blocks once you have given all the required
/// components to the builder.
pub trait ServiceBuilderExport {
/// Type of block of the builder.
type Block: BlockT;
/// Performs the blocks export.
fn export_blocks(
@@ -684,85 +678,18 @@ pub trait ServiceBuilderExport {
to: Option<NumberFor<Self::Block>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>>;
}
/// Implemented on `ServiceBuilder`. Allows reverting the chain once you have given all the
/// required components to the builder.
pub trait ServiceBuilderRevert {
/// Type of block of the builder.
type Block: BlockT;
/// Performs a revert of `blocks` bocks.
/// Performs a revert of `blocks` blocks.
fn revert_chain(
&self,
blocks: NumberFor<Self::Block>
) -> Result<(), Error>;
}
impl<
TBl, TRtApi, TCfg, TGen, TCSExt, TBackend,
TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP,
TExPool, TRpc, Backend
> ServiceBuilderImport for ServiceBuilder<
TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend
> where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TImpQu: 'static + ImportQueue<TBl>,
TRtApi: 'static + Send + Sync,
{
fn import_blocks(
/// Re-validate known block.
fn check_block(
self,
input: impl Read + Seek + Send + 'static,
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
let client = self.client;
let mut queue = self.import_queue;
Box::new(import_blocks!(TBl, client, queue, input).compat())
}
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
ServiceBuilderExport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TRtApi: 'static + Send + Sync,
{
type Block = TBl;
fn export_blocks(
self,
mut output: impl Write + 'static,
from: NumberFor<TBl>,
to: Option<NumberFor<TBl>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>> {
let client = self.client;
Box::new(export_blocks!(client, output, from, to, json).compat())
}
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
ServiceBuilderRevert for ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone
{
type Block = TBl;
fn revert_chain(
&self,
blocks: NumberFor<TBl>
) -> Result<(), Error> {
let client = &self.client;
revert_chain!(client, blocks)
}
block: BlockId<Self::Block>
) -> Box<dyn Future<Item = (), Error = Error> + Send>;
}
impl<TBl, TRtApi, TCfg, TGen, TCSExt, TBackend, TExec, TSc, TImpQu, TNetP, TExPool, TRpc>
+283 -225
View File
@@ -17,234 +17,29 @@
//! Chain utilities.
use crate::error;
use crate::builder::{ServiceBuilderCommand, ServiceBuilder};
use crate::error::Error;
use chain_spec::{ChainSpec, RuntimeGenesis, Extension};
use log::{warn, info};
use futures::{future, prelude::*};
use futures03::{
TryFutureExt as _,
};
use primitives::{Blake2Hasher, Hasher};
use sr_primitives::traits::{
Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion
};
use sr_primitives::generic::{BlockId, SignedBlock};
use codec::{Decode, Encode, IoReader};
use client::Client;
use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue};
use consensus_common::BlockOrigin;
/// Defines the logic for an operation exporting blocks within a range.
#[macro_export]
/// Export blocks
macro_rules! export_blocks {
($client:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{
let mut block = $from;
use std::{
io::{Read, Write, Seek},
};
let last = match $to {
Some(v) if v.is_zero() => One::one(),
Some(v) => v,
None => $client.info().chain.best_number,
};
let mut wrote_header = false;
// Exporting blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we write a block to the output, the `Future` re-schedules itself and returns
// `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block exports,
// or to stop the operation completely.
futures03::future::poll_fn(move |cx| {
if last < block {
return std::task::Poll::Ready(Err("Invalid block range specified".into()));
}
if !wrote_header {
info!("Exporting blocks from #{} to #{}", block, last);
if !$json {
let last_: u64 = last.saturated_into::<u64>();
let block_: u64 = block.saturated_into::<u64>();
let len: u64 = last_ - block_ + 1;
$output.write_all(&len.encode())?;
}
wrote_header = true;
}
match $client.block(&BlockId::number(block))? {
Some(block) => {
if $json {
serde_json::to_writer(&mut $output, &block)
.map_err(|e| format!("Error writing JSON: {}", e))?;
} else {
$output.write_all(&block.encode())?;
}
},
// Reached end of the chain.
None => return std::task::Poll::Ready(Ok(())),
}
if (block % 10000.into()).is_zero() {
info!("#{}", block);
}
if block == last {
return std::task::Poll::Ready(Ok(()));
}
block += One::one();
// Re-schedule the task in order to continue the operation.
cx.waker().wake_by_ref();
std::task::Poll::Pending
})
}}
}
/// Defines the logic for an operation importing blocks from some known import.
#[macro_export]
/// Import blocks
macro_rules! import_blocks {
($block:ty, $client:ident, $queue:ident, $input:ident) => {{
use consensus_common::import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult};
use consensus_common::BlockOrigin;
use network::message;
use sr_primitives::generic::SignedBlock;
use sr_primitives::traits::Block;
struct WaitLink {
imported_blocks: u64,
has_error: bool,
}
impl WaitLink {
fn new() -> WaitLink {
WaitLink {
imported_blocks: 0,
has_error: false,
}
}
}
impl<B: Block> Link<B> for WaitLink {
fn blocks_processed(
&mut self,
imported: usize,
_count: usize,
results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)>
) {
self.imported_blocks += imported as u64;
for result in results {
if let (Err(err), hash) = result {
warn!("There was an error importing block with hash {:?}: {:?}", hash, err);
self.has_error = true;
break;
}
}
}
}
let mut io_reader_input = IoReader($input);
let mut count = None::<u64>;
let mut read_block_count = 0;
let mut link = WaitLink::new();
// Importing blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we read a block from the input or import a bunch of blocks from the import
// queue, the `Future` re-schedules itself and returns `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block imports,
// or to stop the operation completely.
futures03::future::poll_fn(move |cx| {
// Start by reading the number of blocks if not done so already.
let count = match count {
Some(c) => c,
None => {
let c: u64 = match Decode::decode(&mut io_reader_input) {
Ok(c) => c,
Err(err) => {
let err = format!("Error reading file: {}", err);
return std::task::Poll::Ready(Err(From::from(err)));
},
};
info!("Importing {} blocks", c);
count = Some(c);
c
}
};
// Read blocks from the input.
if read_block_count < count {
match SignedBlock::<$block>::decode(&mut io_reader_input) {
Ok(signed) => {
let (header, extrinsics) = signed.block.deconstruct();
let hash = header.hash();
let block = message::BlockData::<$block> {
hash,
justification: signed.justification,
header: Some(header),
body: Some(extrinsics),
receipt: None,
message_queue: None
};
// import queue handles verification and importing it into the client
$queue.import_blocks(BlockOrigin::File, vec![
IncomingBlock::<$block> {
hash: block.hash,
header: block.header,
body: block.body,
justification: block.justification,
origin: None,
allow_missing_state: false,
}
]);
}
Err(e) => {
warn!("Error reading block data at {}: {}", read_block_count, e);
return std::task::Poll::Ready(Ok(()));
}
}
read_block_count += 1;
if read_block_count % 1000 == 0 {
info!("#{} blocks were added to the queue", read_block_count);
}
cx.waker().wake_by_ref();
return std::task::Poll::Pending;
}
let blocks_before = link.imported_blocks;
$queue.poll_actions(cx, &mut link);
if link.has_error {
info!(
"Stopping after #{} blocks because of an error",
link.imported_blocks,
);
return std::task::Poll::Ready(Ok(()));
}
if link.imported_blocks / 1000 != blocks_before / 1000 {
info!(
"#{} blocks were imported (#{} left)",
link.imported_blocks,
count - link.imported_blocks
);
}
if link.imported_blocks >= count {
info!("Imported {} blocks. Best: #{}", read_block_count, $client.info().chain.best_number);
return std::task::Poll::Ready(Ok(()));
} else {
// Polling the import queue will re-schedule the task when ready.
return std::task::Poll::Pending;
}
})
}}
}
/// Revert the chain some number of blocks.
#[macro_export]
macro_rules! revert_chain {
($client:ident, $blocks:ident) => {{
let reverted = $client.revert($blocks)?;
let info = $client.info().chain;
if reverted.is_zero() {
info!("There aren't any non-finalized blocks to revert.");
} else {
info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash);
}
Ok(())
}}
}
use network::message;
/// Build a chain spec json
pub fn build_spec<G, E>(spec: ChainSpec<G, E>, raw: bool) -> error::Result<String> where
@@ -253,3 +48,266 @@ pub fn build_spec<G, E>(spec: ChainSpec<G, E>, raw: bool) -> error::Result<Strin
{
Ok(spec.to_json(raw)?)
}
impl<
TBl, TRtApi, TCfg, TGen, TCSExt, TBackend,
TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP,
TExPool, TRpc, Backend
> ServiceBuilderCommand for ServiceBuilder<
TBl, TRtApi, TCfg, TGen, TCSExt, Client<TBackend, TExec, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend
> where
TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
TBackend: 'static + client_api::backend::Backend<TBl, Blake2Hasher> + Send,
TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
TImpQu: 'static + ImportQueue<TBl>,
TRtApi: 'static + Send + Sync,
{
type Block = TBl;
fn import_blocks(
self,
input: impl Read + Seek + Send + 'static,
force: bool,
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
struct WaitLink {
imported_blocks: u64,
has_error: bool,
}
impl WaitLink {
fn new() -> WaitLink {
WaitLink {
imported_blocks: 0,
has_error: false,
}
}
}
impl<B: BlockT> Link<B> for WaitLink {
fn blocks_processed(
&mut self,
imported: usize,
_count: usize,
results: Vec<(Result<BlockImportResult<NumberFor<B>>, BlockImportError>, B::Hash)>
) {
self.imported_blocks += imported as u64;
for result in results {
if let (Err(err), hash) = result {
warn!("There was an error importing block with hash {:?}: {:?}", hash, err);
self.has_error = true;
break;
}
}
}
}
let client = self.client;
let mut queue = self.import_queue;
let mut io_reader_input = IoReader(input);
let mut count = None::<u64>;
let mut read_block_count = 0;
let mut link = WaitLink::new();
// Importing blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we read a block from the input or import a bunch of blocks from the import
// queue, the `Future` re-schedules itself and returns `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block imports,
// or to stop the operation completely.
let import = futures03::future::poll_fn(move |cx| {
// Start by reading the number of blocks if not done so already.
let count = match count {
Some(c) => c,
None => {
let c: u64 = match Decode::decode(&mut io_reader_input) {
Ok(c) => c,
Err(err) => {
let err = format!("Error reading file: {}", err);
return std::task::Poll::Ready(Err(From::from(err)));
},
};
info!("Importing {} blocks", c);
count = Some(c);
c
}
};
// Read blocks from the input.
if read_block_count < count {
match SignedBlock::<Self::Block>::decode(&mut io_reader_input) {
Ok(signed) => {
let (header, extrinsics) = signed.block.deconstruct();
let hash = header.hash();
let block = message::BlockData::<Self::Block> {
hash,
justification: signed.justification,
header: Some(header),
body: Some(extrinsics),
receipt: None,
message_queue: None
};
// import queue handles verification and importing it into the client
queue.import_blocks(BlockOrigin::File, vec![
IncomingBlock::<Self::Block> {
hash: block.hash,
header: block.header,
body: block.body,
justification: block.justification,
origin: None,
allow_missing_state: false,
import_existing: force,
}
]);
}
Err(e) => {
warn!("Error reading block data at {}: {}", read_block_count, e);
return std::task::Poll::Ready(Ok(()));
}
}
read_block_count += 1;
if read_block_count % 1000 == 0 {
info!("#{} blocks were added to the queue", read_block_count);
}
cx.waker().wake_by_ref();
return std::task::Poll::Pending;
}
let blocks_before = link.imported_blocks;
queue.poll_actions(cx, &mut link);
if link.has_error {
info!(
"Stopping after #{} blocks because of an error",
link.imported_blocks,
);
return std::task::Poll::Ready(Ok(()));
}
if link.imported_blocks / 1000 != blocks_before / 1000 {
info!(
"#{} blocks were imported (#{} left)",
link.imported_blocks,
count - link.imported_blocks
);
}
if link.imported_blocks >= count {
info!("Imported {} blocks. Best: #{}", read_block_count, client.info().chain.best_number);
return std::task::Poll::Ready(Ok(()));
} else {
// Polling the import queue will re-schedule the task when ready.
return std::task::Poll::Pending;
}
});
Box::new(import.compat())
}
fn export_blocks(
self,
mut output: impl Write + 'static,
from: NumberFor<TBl>,
to: Option<NumberFor<TBl>>,
json: bool
) -> Box<dyn Future<Item = (), Error = Error>> {
let client = self.client;
let mut block = from;
let last = match to {
Some(v) if v.is_zero() => One::one(),
Some(v) => v,
None => client.info().chain.best_number,
};
let mut wrote_header = false;
// Exporting blocks is implemented as a future, because we want the operation to be
// interruptible.
//
// Every time we write a block to the output, the `Future` re-schedules itself and returns
// `Poll::Pending`.
// This makes it possible either to interleave other operations in-between the block exports,
// or to stop the operation completely.
let export = futures03::future::poll_fn(move |cx| {
if last < block {
return std::task::Poll::Ready(Err("Invalid block range specified".into()));
}
if !wrote_header {
info!("Exporting blocks from #{} to #{}", block, last);
if !json {
let last_: u64 = last.saturated_into::<u64>();
let block_: u64 = block.saturated_into::<u64>();
let len: u64 = last_ - block_ + 1;
output.write_all(&len.encode())?;
}
wrote_header = true;
}
match client.block(&BlockId::number(block))? {
Some(block) => {
if json {
serde_json::to_writer(&mut output, &block)
.map_err(|e| format!("Error writing JSON: {}", e))?;
} else {
output.write_all(&block.encode())?;
}
},
// Reached end of the chain.
None => return std::task::Poll::Ready(Ok(())),
}
if (block % 10000.into()).is_zero() {
info!("#{}", block);
}
if block == last {
return std::task::Poll::Ready(Ok(()));
}
block += One::one();
// Re-schedule the task in order to continue the operation.
cx.waker().wake_by_ref();
std::task::Poll::Pending
});
Box::new(export.compat())
}
fn revert_chain(
&self,
blocks: NumberFor<TBl>
) -> Result<(), Error> {
let reverted = self.client.revert(blocks)?;
let info = self.client.info().chain;
if reverted.is_zero() {
info!("There aren't any non-finalized blocks to revert.");
} else {
info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash);
}
Ok(())
}
fn check_block(
self,
block_id: BlockId<TBl>
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
match self.client.block(&block_id) {
Ok(Some(block)) => {
let mut buf = Vec::new();
1u64.encode_to(&mut buf);
block.encode_to(&mut buf);
let reader = std::io::Cursor::new(buf);
self.import_blocks(reader, true)
}
Ok(None) => Box::new(future::err("Unknown block".into())),
Err(e) => Box::new(future::err(format!("Error reading block: {:?}", e).into())),
}
}
}
+1 -3
View File
@@ -53,9 +53,7 @@ use sr_primitives::generic::BlockId;
use sr_primitives::traits::{NumberFor, Block as BlockT};
pub use self::error::Error;
pub use self::builder::{
ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert,
};
pub use self::builder::{ServiceBuilder, ServiceBuilderCommand};
pub use config::{Configuration, Roles, PruningMode};
pub use chain_spec::{ChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension};
pub use txpool_api::{TransactionPool, TransactionPoolMaintainer, InPoolTransaction, IntoPoolError};