Check for disposed blocks when creating a state. (#1636)

* Check for disposed blocks

* fixed changes_tries_with_digest_are_pruned_on_finalization

* Indent

Co-Authored-By: arkpar <arkady.paronyan@gmail.com>
This commit is contained in:
Arkadiy Paronyan
2019-01-31 18:06:10 +01:00
committed by Robert Habermeier
parent da029c87b7
commit 4bcc8eda41
6 changed files with 106 additions and 40 deletions
+1
View File
@@ -3490,6 +3490,7 @@ dependencies = [
name = "substrate-client-db"
version = "0.1.0"
dependencies = [
"env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hash-db 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)",
"kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=b0317f649ab2c665b7987b8475878fc4d2e1f81d)",
+1
View File
@@ -26,3 +26,4 @@ trie = { package = "substrate-trie", path = "../../trie" }
kvdb-memorydb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d" }
substrate-keyring = { path = "../../keyring" }
test-client = { package = "substrate-test-client", path = "../../test-client" }
env_logger = { version = "0.6" }
+59 -16
View File
@@ -612,7 +612,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
if number_u64 > self.canonicalization_delay {
let new_canonical = number_u64 - self.canonicalization_delay;
if new_canonical <= self.storage.state_db.best_canonical() {
if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) {
return Ok(())
}
@@ -751,7 +751,8 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
.map_err(|e: state_db::Error<io::Error>| client::error::Error::from(format!("State database error: {:?}", e)))?;
apply_state_commit(&mut transaction, commit);
let finalized = match pending_block.leaf_state {
// Check if need to finalize. Genesis is always finalized instantly.
let finalized = number_u64 == 0 || match pending_block.leaf_state {
NewBlockState::Final => true,
_ => false,
};
@@ -759,7 +760,6 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
let header = &pending_block.header;
let is_best = pending_block.leaf_state.is_best();
let changes_trie_updates = operation.changes_trie_updates;
self.changes_tries_storage.commit(&mut transaction, changes_trie_updates);
@@ -834,7 +834,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
{
let f_num = f_header.number().clone();
if f_num.as_() > self.storage.state_db.best_canonical() {
if self.storage.state_db.best_canonical().map(|c| f_num.as_() > c).unwrap_or(true) {
let parent_hash = f_header.parent_hash().clone();
let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone());
@@ -1022,13 +1022,18 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
}
match self.blockchain.header(block) {
Ok(Some(ref hdr)) if !self.storage.state_db.is_pruned(hdr.number().as_()) => {
let root = H256::from_slice(hdr.state_root().as_ref());
let state = DbState::new(self.storage.clone(), root);
Ok(CachingState::new(state, self.shared_cache.clone(), Some(hdr.hash())))
Ok(Some(ref hdr)) => {
let hash = hdr.hash();
if !self.storage.state_db.is_pruned(&hash, hdr.number().as_()) {
let root = H256::from_slice(hdr.state_root().as_ref());
let state = DbState::new(self.storage.clone(), root);
Ok(CachingState::new(state, self.shared_cache.clone(), Some(hash)))
} else {
Err(client::error::ErrorKind::UnknownBlock(format!("State already discarded for {:?}", block)).into())
}
},
Ok(None) => Err(client::error::ErrorKind::UnknownBlock(format!("{:?}", block)).into()),
Err(e) => Err(e),
_ => Err(client::error::ErrorKind::UnknownBlock(format!("{:?}", block)).into()),
}
}
@@ -1051,7 +1056,6 @@ mod tests {
use crate::columns;
use client::backend::Backend as BTrait;
use client::backend::BlockImportOperation as Op;
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper};
use runtime_primitives::traits::{Hash, BlakeTwo256};
use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage};
@@ -1247,8 +1251,9 @@ mod tests {
#[test]
fn delete_only_when_negative_rc() {
let _ = ::env_logger::try_init();
let key;
let backend = Backend::<Block>::new_test(0, 0);
let backend = Backend::<Block>::new_test(1, 0);
let hash = {
let mut op = backend.begin_operation().unwrap();
@@ -1321,7 +1326,7 @@ mod tests {
hash
};
{
let hash = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap();
let mut header = Header {
@@ -1339,6 +1344,7 @@ mod tests {
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
let hash = header.hash();
op.db_updates.remove(&key);
op.set_block_data(
@@ -1350,11 +1356,44 @@ mod tests {
backend.commit_operation(op).unwrap();
assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_some());
hash
};
{
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap();
let mut header = Header {
number: 3,
parent_hash: hash,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage: Vec<(_, _)> = vec![];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_none());
}
backend.finalize_block(BlockId::Number(1), None).unwrap();
backend.finalize_block(BlockId::Number(2), None).unwrap();
backend.finalize_block(BlockId::Number(3), None).unwrap();
assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_none());
}
@@ -1366,7 +1405,10 @@ mod tests {
let check_changes = |backend: &Backend<Block>, block: u64, changes: Vec<(Vec<u8>, Vec<u8>)>| {
let (changes_root, mut changes_trie_update) = prepare_changes(changes);
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: Default::default(), number: block };
let anchor = state_machine::ChangesTrieAnchorBlockId {
hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(),
number: block
};
assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root)));
for (key, (val, _)) in changes_trie_update.drain() {
@@ -1450,7 +1492,6 @@ mod tests {
#[test]
fn changes_tries_with_digest_are_pruned_on_finalization() {
let mut backend = Backend::<Block>::new_test(1000, 100);
backend.changes_tries_storage.meta.write().finalized_number = 1000;
backend.changes_tries_storage.min_blocks_to_keep = Some(8);
let config = ChangesTrieConfiguration {
digest_interval: 2,
@@ -1470,10 +1511,12 @@ mod tests {
let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default());
let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default());
let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default());
let _ = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default());
let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default());
let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default());
backend.changes_tries_storage.meta.write().finalized_number = 13;
// check that roots of all tries are in the columns::CHANGES_TRIE
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: Default::default(), number: 100 };
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 };
fn read_changes_trie_root(backend: &Backend<Block>, num: u64) -> H256 {
backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter()
.find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone()
+18 -17
View File
@@ -190,12 +190,6 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
}
pub fn insert_block<E: fmt::Debug>(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet<Key>) -> Result<CommitSet<Key>, Error<E>> {
if number == 0 {
return Ok(CommitSet {
data: changeset,
meta: Default::default(),
})
}
match self.mode {
PruningMode::ArchiveAll => {
changeset.deleted.clear();
@@ -232,12 +226,16 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
Ok(commit)
}
pub fn best_canonical(&self) -> u64 {
pub fn best_canonical(&self) -> Option<u64> {
return self.non_canonical.last_canonicalized_block_number()
}
pub fn is_pruned(&self, number: u64) -> bool {
self.pruning.as_ref().map_or(false, |pruning| number < pruning.pending())
pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool {
if self.best_canonical().map(|c| number > c).unwrap_or(true) {
!self.non_canonical.have_block(hash)
} else {
self.pruning.as_ref().map_or(false, |pruning| number < pruning.pending() || !pruning.have_block(hash))
}
}
fn prune(&mut self, commit: &mut CommitSet<Key>) {
@@ -351,13 +349,13 @@ impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
}
/// Returns last finalized block number.
pub fn best_canonical(&self) -> u64 {
pub fn best_canonical(&self) -> Option<u64> {
return self.db.read().best_canonical()
}
/// Check if block is pruned away.
pub fn is_pruned(&self, number: u64) -> bool {
return self.db.read().is_pruned(number)
pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool {
return self.db.read().is_pruned(hash, number)
}
/// Apply all pending changes
@@ -471,9 +469,10 @@ mod tests {
max_blocks: Some(1),
max_mem: None,
}));
assert!(sdb.is_pruned(0));
assert!(sdb.is_pruned(1));
assert!(!sdb.is_pruned(2));
assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0));
assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1));
assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2));
assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2));
assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94])));
}
@@ -483,8 +482,10 @@ mod tests {
max_blocks: Some(2),
max_mem: None,
}));
assert!(sdb.is_pruned(0));
assert!(!sdb.is_pruned(1));
assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0));
assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1));
assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2));
assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2));
assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94])));
}
}
+18 -7
View File
@@ -119,10 +119,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
pub fn insert<E: fmt::Debug>(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet<Key>) -> Result<CommitSet<Key>, Error<E>> {
let mut commit = CommitSet::default();
let front_block_number = self.pending_front_block_number();
if self.levels.is_empty() && self.last_canonicalized.is_none() {
if number < 1 {
return Err(Error::InvalidBlockNumber);
}
if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 {
// assume that parent was canonicalized
let last_canonicalized = (parent_hash.clone(), number - 1);
commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode()));
@@ -224,8 +221,12 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
.unwrap_or(0)
}
pub fn last_canonicalized_block_number(&self) -> u64 {
self.last_canonicalized.as_ref().map(|&(_, n)| n).unwrap_or(0)
pub fn last_canonicalized_block_number(&self) -> Option<u64> {
match self.last_canonicalized.as_ref().map(|&(_, n)| n) {
Some(n) => Some(n + self.pending_canonicalizations.len() as u64),
None if !self.pending_canonicalizations.is_empty() => Some(self.pending_canonicalizations.len() as u64),
_ => None,
}
}
/// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root.
@@ -278,7 +279,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
}
}
if let Some(hash) = last {
let last_canonicalized = (hash, self.last_canonicalized_block_number() + count);
let last_canonicalized = (hash, self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1));
self.last_canonicalized = Some(last_canonicalized);
}
}
@@ -295,6 +296,12 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
None
}
/// Check if the block is in the canonicalization queue.
pub fn have_block(&self, hash: &BlockHash) -> bool {
(self.parents.contains_key(hash) || self.pending_insertions.contains(hash))
&& !self.pending_canonicalizations.contains(hash)
}
/// Revert a single level. Returns commit set that deletes the journal or `None` if not possible.
pub fn revert_one(&mut self) -> Option<CommitSet<Key>> {
self.levels.pop_back().map(|level| {
@@ -589,6 +596,10 @@ mod tests {
assert!(contains(&overlay, 121));
assert!(contains(&overlay, 122));
assert!(contains(&overlay, 123));
assert!(overlay.have_block(&h_1_2_1));
assert!(!overlay.have_block(&h_1_2));
assert!(!overlay.have_block(&h_1_1));
assert!(!overlay.have_block(&h_1_1_1));
// canonicalize 1_2_2
db.commit(&overlay.canonicalize::<io::Error>(&h_1_2_2).unwrap());
+9
View File
@@ -130,6 +130,11 @@ impl<BlockHash: Hash, Key: Hash> RefWindow<BlockHash, Key> {
self.pending_number + self.pending_prunings as u64
}
pub fn have_block(&self, hash: &BlockHash) -> bool {
self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) ||
self.pending_records.iter().any(|(_, record)| record.hash == *hash)
}
/// Prune next block. Expects at least one block in the window. Adds changes to `commit`.
pub fn prune_one(&mut self, commit: &mut CommitSet<Key>) {
if let Some(pruned) = self.death_rows.get(self.pending_prunings) {
@@ -236,7 +241,9 @@ mod tests {
let h = H256::random();
pruning.note_canonical(&h, &mut commit);
db.commit(&commit);
assert!(pruning.have_block(&h));
pruning.apply_pending();
assert!(pruning.have_block(&h));
assert!(commit.data.deleted.is_empty());
assert_eq!(pruning.death_rows.len(), 1);
assert_eq!(pruning.death_index.len(), 2);
@@ -245,8 +252,10 @@ mod tests {
let mut commit = CommitSet::default();
pruning.prune_one(&mut commit);
assert!(!pruning.have_block(&h));
db.commit(&commit);
pruning.apply_pending();
assert!(!pruning.have_block(&h));
assert!(db.data_eq(&make_db(&[2, 4, 5])));
assert!(pruning.death_rows.is_empty());
assert!(pruning.death_index.is_empty());