Kill the light client, CHTs and change tries. (#10080)

* Remove light client, change tries and CHTs

* Update tests

* fmt

* Restore changes_root

* Fixed benches

* Cargo fmt

* fmt

* fmt
This commit is contained in:
Arkadiy Paronyan
2021-11-12 14:15:01 +01:00
committed by GitHub
parent 112b7dac47
commit 4cbbf0cf43
141 changed files with 532 additions and 17807 deletions
@@ -1,114 +0,0 @@
// This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Blockchain API backend for light nodes.
use futures::{future::ready, FutureExt, TryFutureExt};
use jsonrpc_pubsub::manager::SubscriptionManager;
use std::sync::Arc;
use sc_client_api::light::{Fetcher, RemoteBlockchain, RemoteBodyRequest};
use sp_runtime::{
generic::{BlockId, SignedBlock},
traits::Block as BlockT,
};
use super::{client_err, error::FutureResult, ChainBackend};
use sc_client_api::BlockchainEvents;
use sp_blockchain::HeaderBackend;
/// Blockchain API backend for light nodes. Reads all the data from local
/// database, if available, or fetches it from remote node otherwise.
pub struct LightChain<Block: BlockT, Client, F> {
/// Substrate client.
client: Arc<Client>,
/// Current subscriptions.
subscriptions: SubscriptionManager,
/// Remote blockchain reference
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
/// Remote fetcher reference.
fetcher: Arc<F>,
}
impl<Block: BlockT, Client, F: Fetcher<Block>> LightChain<Block, Client, F> {
/// Create new Chain API RPC handler.
pub fn new(
client: Arc<Client>,
subscriptions: SubscriptionManager,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Self {
Self { client, subscriptions, remote_blockchain, fetcher }
}
}
impl<Block, Client, F> ChainBackend<Client, Block> for LightChain<Block, Client, F>
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: BlockchainEvents<Block> + HeaderBackend<Block> + Send + Sync + 'static,
F: Fetcher<Block> + Send + Sync + 'static,
{
fn client(&self) -> &Arc<Client> {
&self.client
}
fn subscriptions(&self) -> &SubscriptionManager {
&self.subscriptions
}
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>> {
let hash = self.unwrap_or_best(hash);
let fetcher = self.fetcher.clone();
let maybe_header = sc_client_api::light::future_header(
&*self.remote_blockchain,
&*fetcher,
BlockId::Hash(hash),
);
maybe_header.then(move |result| ready(result.map_err(client_err))).boxed()
}
fn block(&self, hash: Option<Block::Hash>) -> FutureResult<Option<SignedBlock<Block>>> {
let fetcher = self.fetcher.clone();
self.header(hash)
.and_then(move |header| async move {
match header {
Some(header) => {
let body = fetcher
.remote_body(RemoteBodyRequest {
header: header.clone(),
retry_count: Default::default(),
})
.await;
body.map(|body| {
Some(SignedBlock {
block: Block::new(header, body),
justifications: None,
})
})
.map_err(client_err)
},
None => Ok(None),
}
})
.boxed()
}
}
+1 -28
View File
@@ -19,7 +19,6 @@
//! Substrate blockchain API.
mod chain_full;
mod chain_light;
#[cfg(test)]
mod tests;
@@ -33,10 +32,7 @@ use rpc::{
use std::sync::Arc;
use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId};
use sc_client_api::{
light::{Fetcher, RemoteBlockchain},
BlockchainEvents,
};
use sc_client_api::BlockchainEvents;
use sp_rpc::{list::ListOrValue, number::NumberOrHex};
use sp_runtime::{
generic::{BlockId, SignedBlock},
@@ -204,29 +200,6 @@ where
Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) }
}
/// Create new state API that works on light node.
pub fn new_light<Block: BlockT, Client, F: Fetcher<Block>>(
client: Arc<Client>,
subscriptions: SubscriptionManager,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Chain<Block, Client>
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: BlockBackend<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
F: Send + Sync + 'static,
{
Chain {
backend: Box::new(self::chain_light::LightChain::new(
client,
subscriptions,
remote_blockchain,
fetcher,
)),
}
}
/// Chain API with subscriptions support.
pub struct Chain<Block: BlockT, Client> {
backend: Box<dyn ChainBackend<Client, Block>>,
+8 -117
View File
@@ -26,11 +26,7 @@ use futures::{
use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId};
use log::warn;
use rpc::Result as RpcResult;
use std::{
collections::{BTreeMap, HashMap},
ops::Range,
sync::Arc,
};
use std::{collections::HashMap, sync::Arc};
use sc_rpc_api::state::ReadProof;
use sp_blockchain::{
@@ -43,10 +39,7 @@ use sp_core::{
},
Bytes,
};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion},
};
use sp_runtime::{generic::BlockId, traits::Block as BlockT};
use sp_version::RuntimeVersion;
use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi};
@@ -66,14 +59,6 @@ use std::marker::PhantomData;
struct QueryStorageRange<Block: BlockT> {
/// Hashes of all the blocks in the range.
pub hashes: Vec<Block::Hash>,
/// Number of the first block in the range.
pub first_number: NumberFor<Block>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at
/// each state to get changes.
pub unfiltered_range: Range<usize>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter
/// blocks-with-changes by using changes tries.
pub filtered_range: Option<Range<usize>>,
}
/// State API backend for full nodes.
@@ -107,10 +92,8 @@ where
Ok(hash.unwrap_or_else(|| self.client.info().best_hash))
}
/// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges.
/// Blocks that contain changes within filtered subrange could be filtered using changes tries.
/// Blocks that contain changes within unfiltered subrange must be filtered manually.
fn split_query_storage_range(
/// Validates block range.
fn query_storage_range(
&self,
from: Block::Hash,
to: Option<Block::Hash>,
@@ -156,23 +139,7 @@ where
hashes
};
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let changes_trie_range = self
.client
.max_key_changes_range(from_number, BlockId::Hash(to_meta.hash))
.map_err(client_err)?;
let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| {
// avoids a corner case where begin < from_number (happens when querying genesis)
begin.checked_sub(&from_number).map(|x| x.saturated_into::<usize>())
});
let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin);
Ok(QueryStorageRange {
hashes,
first_number: from_number,
unfiltered_range,
filtered_range,
})
Ok(QueryStorageRange { hashes })
}
/// Iterates through range.unfiltered_range and check each block for changes of keys' values.
@@ -183,8 +150,8 @@ where
last_values: &mut HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
for block in range.unfiltered_range.start..range.unfiltered_range.end {
let block_hash = range.hashes[block].clone();
for block_hash in &range.hashes {
let block_hash = block_hash.clone();
let mut block_changes =
StorageChangeSet { block: block_hash.clone(), changes: Vec::new() };
let id = BlockId::hash(block_hash);
@@ -207,57 +174,6 @@ where
}
Ok(())
}
/// Iterates through all blocks that are changing keys within range.filtered_range and collects
/// these changes.
fn query_storage_filtered(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
let (begin, end) = match range.filtered_range {
Some(ref filtered_range) => (
range.first_number + filtered_range.start.saturated_into(),
BlockId::Hash(range.hashes[filtered_range.end - 1].clone()),
),
None => return Ok(()),
};
let mut changes_map: BTreeMap<NumberFor<Block>, StorageChangeSet<Block::Hash>> =
BTreeMap::new();
for key in keys {
let mut last_block = None;
let mut last_value = last_values.get(key).cloned().unwrap_or_default();
let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?;
for (block, _) in key_changes.into_iter().rev() {
if last_block == Some(block) {
continue
}
let block_hash =
range.hashes[(block - range.first_number).saturated_into::<usize>()].clone();
let id = BlockId::Hash(block_hash);
let value_at_block = self.client.storage(&id, key).map_err(client_err)?;
if last_value == value_at_block {
continue
}
changes_map
.entry(block)
.or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() })
.changes
.push((key.clone(), value_at_block.clone()));
last_block = Some(block);
last_value = value_at_block;
}
}
if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) {
changes.reserve(additional_capacity);
}
changes.extend(changes_map.into_iter().map(|(_, cs)| cs));
Ok(())
}
}
impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Client>
@@ -430,11 +346,10 @@ where
keys: Vec<StorageKey>,
) -> FutureResult<Vec<StorageChangeSet<Block::Hash>>> {
let call_fn = move || {
let range = self.split_query_storage_range(from, to)?;
let range = self.query_storage_range(from, to)?;
let mut changes = Vec::new();
let mut last_values = HashMap::new();
self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?;
self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?;
Ok(changes)
};
@@ -768,30 +683,6 @@ where
}
}
/// Splits passed range into two subranges where:
/// - first range has at least one element in it;
/// - second range (optionally) starts at given `middle` element.
pub(crate) fn split_range(
size: usize,
middle: Option<usize>,
) -> (Range<usize>, Option<Range<usize>>) {
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let range2_begin = match middle {
// some of required changes tries are pruned => use available tries
Some(middle) if middle != 0 => Some(middle),
// all required changes tries are available, but we still want values at first block
// => do 'unfiltered' read for the first block and 'filtered' for the rest
Some(_) if size > 1 => Some(1),
// range contains single element => do not use changes tries
Some(_) => None,
// changes tries are not available => do 'unfiltered' read for the whole range
None => None,
};
let range1 = 0..range2_begin.unwrap_or(size);
let range2 = range2_begin.map(|begin| begin..size);
(range1, range2)
}
fn invalid_block_range<B: BlockT>(
from: &CachedHeaderMetadata<B>,
to: &CachedHeaderMetadata<B>,
+5 -29
View File
@@ -17,16 +17,15 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use self::error::Error;
use super::{state_full::split_range, *};
use super::*;
use crate::testing::TaskExecutor;
use assert_matches::assert_matches;
use futures::{executor, StreamExt};
use sc_block_builder::BlockBuilderProvider;
use sc_rpc_api::DenyUnsafe;
use sp_consensus::BlockOrigin;
use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration};
use sp_core::{hash::H256, storage::ChildInfo};
use sp_io::hashing::blake2_256;
use sp_runtime::generic::BlockId;
use std::sync::Arc;
use substrate_test_runtime_client::{prelude::*, runtime};
@@ -336,7 +335,7 @@ fn should_send_initial_storage_changes_and_notifications() {
#[test]
fn should_query_storage() {
fn run_tests(mut client: Arc<TestClient>, has_changes_trie_config: bool) {
fn run_tests(mut client: Arc<TestClient>) {
let (api, _child) = new_full(
client.clone(),
SubscriptionManager::new(Arc::new(TaskExecutor)),
@@ -369,13 +368,6 @@ fn should_query_storage() {
let block2_hash = add_block(1);
let genesis_hash = client.genesis_hash();
if has_changes_trie_config {
assert_eq!(
client.max_key_changes_range(1, BlockId::Hash(block1_hash)).unwrap(),
Some((0, BlockId::Hash(block1_hash))),
);
}
let mut expected = vec![
StorageChangeSet {
block: genesis_hash,
@@ -519,24 +511,8 @@ fn should_query_storage() {
);
}
run_tests(Arc::new(substrate_test_runtime_client::new()), false);
run_tests(
Arc::new(
TestClientBuilder::new()
.changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2)))
.build(),
),
true,
);
}
#[test]
fn should_split_ranges() {
assert_eq!(split_range(1, None), (0..1, None));
assert_eq!(split_range(100, None), (0..100, None));
assert_eq!(split_range(1, Some(0)), (0..1, None));
assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100)));
assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100)));
run_tests(Arc::new(substrate_test_runtime_client::new()));
run_tests(Arc::new(TestClientBuilder::new().build()));
}
#[test]