Limit transaction pool size (#1676)

* Avoid excessive hashing. Store extrinsic len.

* Implement pool limits.

* Fix issues.

* Make sure we return error in case it doesn't make into the pool.

* Pass parameters from CLI.

* Remove redundant todo.

* Fix tests.
This commit is contained in:
Tomasz Drwięga
2019-02-06 19:03:05 +01:00
committed by Gav Wood
parent 461cd384fc
commit 4e3eace15f
14 changed files with 398 additions and 38 deletions
@@ -86,6 +86,8 @@ pub struct PruneStatus<Hash, Ex> {
pub struct Transaction<Hash, Extrinsic> {
/// Raw extrinsic representing that transaction.
pub data: Extrinsic,
/// Number of bytes encoding of the transaction requires.
pub bytes: usize,
/// Transaction hash (unique)
pub hash: Hash,
/// Transaction priority (higher = better)
@@ -136,7 +138,7 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: ::std::fmt::Debug> BasePool<Hash
tx: Transaction<Hash, Ex>,
) -> error::Result<Imported<Hash, Ex>> {
if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) {
bail!(error::ErrorKind::AlreadyImported)
bail!(error::ErrorKind::AlreadyImported(Box::new(tx.hash.clone())))
}
let tx = WaitingTransaction::new(tx, self.ready.provided_tags());
@@ -243,6 +245,58 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: ::std::fmt::Debug> BasePool<Hash
.collect()
}
/// Makes sure that the transactions in the queues stay within provided limits.
///
/// Removes and returns worst transactions from the queues and all transactions that depend on them.
/// Technically the worst transaction should be evaluated by computing the entire pending set.
/// We use a simplified approach to remove the transaction that occupies the pool for the longest time.
pub fn enforce_limits(&mut self, ready: &Limit, future: &Limit) -> Vec<Arc<Transaction<Hash, Ex>>> {
let mut removed = vec![];
while ready.is_exceeded(self.ready.len(), self.ready.bytes()) {
// find the worst transaction
let minimal = self.ready
.fold(|minimal, current| {
let transaction = &current.transaction;
match minimal {
None => Some(transaction.clone()),
Some(ref tx) if tx.insertion_id > transaction.insertion_id => {
Some(transaction.clone())
},
other => other,
}
});
if let Some(minimal) = minimal {
removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()]))
} else {
break;
}
}
while future.is_exceeded(self.future.len(), self.future.bytes()) {
// find the worst transaction
let minimal = self.future
.fold(|minimal, current| {
match minimal {
None => Some(current.clone()),
Some(ref tx) if tx.imported_at > current.imported_at => {
Some(current.clone())
},
other => other,
}
});
if let Some(minimal) = minimal {
removed.append(&mut self.remove_invalid(&[minimal.transaction.hash.clone()]))
} else {
break;
}
}
removed
}
/// Removes all transactions represented by the hashes and all other transactions
/// that depend on them.
///
@@ -298,7 +352,9 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: ::std::fmt::Debug> BasePool<Hash
pub fn status(&self) -> Status {
Status {
ready: self.ready.len(),
ready_bytes: self.ready.bytes(),
future: self.future.len(),
future_bytes: self.future.bytes(),
}
}
}
@@ -307,8 +363,12 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: ::std::fmt::Debug> BasePool<Hash
pub struct Status {
/// Number of transactions in the ready queue.
pub ready: usize,
/// Sum of bytes of ready transaction encodings.
pub ready_bytes: usize,
/// Number of transactions in the future queue.
pub future: usize,
/// Sum of bytes of ready transaction encodings.
pub future_bytes: usize,
}
impl Status {
@@ -318,6 +378,22 @@ impl Status {
}
}
/// Queue limits
#[derive(Debug, Clone)]
pub struct Limit {
/// Maximal number of transactions in the queue.
pub count: usize,
/// Maximal size of encodings of all transactions in the queue.
pub total_bytes: usize,
}
impl Limit {
/// Returns true if any of the provided values exceeds the limit.
pub fn is_exceeded(&self, count: usize, bytes: usize) -> bool {
self.count < count || self.total_bytes < bytes
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -336,6 +412,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1u64,
priority: 5u64,
valid_till: 64u64,
@@ -356,6 +433,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -364,6 +442,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -385,6 +464,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -395,6 +475,7 @@ mod tests {
assert_eq!(pool.ready.len(), 0);
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -415,6 +496,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -423,6 +505,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![3u8],
bytes: 1,
hash: 3,
priority: 5u64,
valid_till: 64u64,
@@ -431,6 +514,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -439,6 +523,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![4u8],
bytes: 1,
hash: 4,
priority: 1_000u64,
valid_till: 64u64,
@@ -450,6 +535,7 @@ mod tests {
let res = pool.import(Transaction {
data: vec![5u8],
bytes: 1,
hash: 5,
priority: 5u64,
valid_till: 64u64,
@@ -480,6 +566,7 @@ mod tests {
let mut pool = pool();
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -488,6 +575,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![3u8],
bytes: 1,
hash: 3,
priority: 5u64,
valid_till: 64u64,
@@ -500,6 +588,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -518,6 +607,7 @@ mod tests {
// let's close the cycle with one additional transaction
let res = pool.import(Transaction {
data: vec![4u8],
bytes: 1,
hash: 4,
priority: 50u64,
valid_till: 64u64,
@@ -545,6 +635,7 @@ mod tests {
let mut pool = pool();
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -553,6 +644,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![3u8],
bytes: 1,
hash: 3,
priority: 5u64,
valid_till: 64u64,
@@ -565,6 +657,7 @@ mod tests {
// when
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -583,6 +676,7 @@ mod tests {
// let's close the cycle with one additional transaction
let err = pool.import(Transaction {
data: vec![4u8],
bytes: 1,
hash: 4,
priority: 1u64, // lower priority than Tx(2)
valid_till: 64u64,
@@ -605,6 +699,7 @@ mod tests {
let mut pool = pool();
pool.import(Transaction {
data: vec![5u8],
bytes: 1,
hash: 5,
priority: 5u64,
valid_till: 64u64,
@@ -613,6 +708,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -621,6 +717,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![3u8],
bytes: 1,
hash: 3,
priority: 5u64,
valid_till: 64u64,
@@ -629,6 +726,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -637,6 +735,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![4u8],
bytes: 1,
hash: 4,
priority: 1_000u64,
valid_till: 64u64,
@@ -646,6 +745,7 @@ mod tests {
// future
pool.import(Transaction {
data: vec![6u8],
bytes: 1,
hash: 6,
priority: 1_000u64,
valid_till: 64u64,
@@ -671,6 +771,7 @@ mod tests {
// future (waiting for 0)
pool.import(Transaction {
data: vec![5u8],
bytes: 1,
hash: 5,
priority: 5u64,
valid_till: 64u64,
@@ -680,6 +781,7 @@ mod tests {
// ready
pool.import(Transaction {
data: vec![1u8],
bytes: 1,
hash: 1,
priority: 5u64,
valid_till: 64u64,
@@ -688,6 +790,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![2u8],
bytes: 1,
hash: 2,
priority: 5u64,
valid_till: 64u64,
@@ -696,6 +799,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![3u8],
bytes: 1,
hash: 3,
priority: 5u64,
valid_till: 64u64,
@@ -704,6 +808,7 @@ mod tests {
}).unwrap();
pool.import(Transaction {
data: vec![4u8],
bytes: 1,
hash: 4,
priority: 1_000u64,
valid_till: 64u64,
@@ -39,9 +39,9 @@ error_chain! {
display("Temporarily Banned"),
}
/// The transaction is already in the pool.
AlreadyImported {
description("Transaction is already in the pool."),
display("Already imported"),
AlreadyImported(hash: Box<::std::any::Any + Send>) {
description("Transaction is already in the pool"),
display("[{:?}] Already imported", hash),
}
/// The transaction cannot be imported cause it's a replacement and has too low priority.
TooLowPriority(old: Priority, new: Priority) {
@@ -53,6 +53,11 @@ error_chain! {
description("Transaction was not imported because of detected cycle."),
display("Cycle Detected"),
}
/// Transaction was dropped immediately after it got inserted.
ImmediatelyDropped {
description("Transaction couldn't enter the pool because of the limit."),
display("Immediately Dropped"),
}
}
}
@@ -18,6 +18,7 @@ use std::{
collections::{HashMap, HashSet},
hash,
sync::Arc,
time,
};
use sr_primitives::transaction_validity::{
@@ -33,6 +34,18 @@ pub struct WaitingTransaction<Hash, Ex> {
pub transaction: Arc<Transaction<Hash, Ex>>,
/// Tags that are required and have not been satisfied yet by other transactions in the pool.
pub missing_tags: HashSet<Tag>,
/// Time of import to the Future Queue.
pub imported_at: time::Instant,
}
impl<Hash, Ex> Clone for WaitingTransaction<Hash, Ex> {
fn clone(&self) -> Self {
WaitingTransaction {
transaction: self.transaction.clone(),
missing_tags: self.missing_tags.clone(),
imported_at: self.imported_at.clone(),
}
}
}
impl<Hash, Ex> WaitingTransaction<Hash, Ex> {
@@ -50,6 +63,7 @@ impl<Hash, Ex> WaitingTransaction<Hash, Ex> {
WaitingTransaction {
transaction: Arc::new(transaction),
missing_tags,
imported_at: time::Instant::now(),
}
}
@@ -174,6 +188,13 @@ impl<Hash: hash::Hash + Eq + Clone, Ex> FutureTransactions<Hash, Ex> {
removed
}
/// Fold a list of future transactions to compute a single value.
pub fn fold<R, F: FnMut(Option<R>, &WaitingTransaction<Hash, Ex>) -> Option<R>>(&mut self, f: F) -> Option<R> {
self.waiting
.values()
.fold(None, f)
}
/// Returns iterator over all future transactions
pub fn all(&self) -> impl Iterator<Item=&Transaction<Hash, Ex>> {
self.waiting.values().map(|waiting| &*waiting.transaction)
@@ -183,4 +204,9 @@ impl<Hash: hash::Hash + Eq + Clone, Ex> FutureTransactions<Hash, Ex> {
pub fn len(&self) -> usize {
self.waiting.len()
}
/// Returns sum of encoding lengths of all transactions in this queue.
pub fn bytes(&self) -> usize {
self.waiting.values().fold(0, |acc, tx| acc + tx.transaction.bytes)
}
}
+178 -18
View File
@@ -15,7 +15,7 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use std::{
collections::HashMap,
collections::{HashSet, HashMap},
hash,
sync::Arc,
time,
@@ -38,6 +38,8 @@ use sr_primitives::{
transaction_validity::{TransactionValidity, TransactionTag as Tag},
};
pub use crate::base_pool::Limit;
/// Modification notification event stream type;
pub type EventStream = mpsc::UnboundedReceiver<()>;
@@ -70,17 +72,38 @@ pub trait ChainApi: Send + Sync {
/// Returns a block hash given the block id.
fn block_id_to_hash(&self, at: &BlockId<Self::Block>) -> Result<Option<BlockHash<Self>>, Self::Error>;
/// Hash the extrinsic.
fn hash(&self, uxt: &ExtrinsicFor<Self>) -> Self::Hash;
/// Returns hash and encoding length of the extrinsic.
fn hash_and_length(&self, uxt: &ExtrinsicFor<Self>) -> (Self::Hash, usize);
}
/// Pool configuration options.
#[derive(Debug, Clone, Default)]
pub struct Options;
#[derive(Debug, Clone)]
pub struct Options {
/// Ready queue limits.
pub ready: Limit,
/// Future queue limits.
pub future: Limit,
}
impl Default for Options {
fn default() -> Self {
Options {
ready: Limit {
count: 512,
total_bytes: 10 * 1024 * 1024,
},
future: Limit {
count: 128,
total_bytes: 1 * 1024 * 1024,
},
}
}
}
/// Extrinsics pool.
pub struct Pool<B: ChainApi> {
api: B,
options: Options,
listener: RwLock<Listener<ExHash<B>, BlockHash<B>>>,
pool: RwLock<base::BasePool<
ExHash<B>,
@@ -91,7 +114,6 @@ pub struct Pool<B: ChainApi> {
}
impl<B: ChainApi> Pool<B> {
/// Imports a bunch of unverified extrinsics to the pool
pub fn submit_at<T>(&self, at: &BlockId<B::Block>, xts: T) -> Result<Vec<Result<ExHash<B>, B::Error>>, B::Error> where
T: IntoIterator<Item=ExtrinsicFor<B>>
@@ -99,10 +121,10 @@ impl<B: ChainApi> Pool<B> {
let block_number = self.api.block_id_to_number(at)?
.ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())?;
Ok(xts
let results = xts
.into_iter()
.map(|xt| -> Result<_, B::Error> {
let hash = self.api.hash(&xt);
let (hash, bytes) = self.api.hash_and_length(&xt);
if self.rotator.is_banned(&hash) {
bail!(error::Error::from(error::ErrorKind::TemporarilyBanned))
}
@@ -111,6 +133,7 @@ impl<B: ChainApi> Pool<B> {
TransactionValidity::Valid { priority, requires, provides, longevity } => {
Ok(base::Transaction {
data: xt,
bytes,
hash,
priority,
requires,
@@ -138,7 +161,42 @@ impl<B: ChainApi> Pool<B> {
fire_events(&mut *listener, &imported);
Ok(imported.hash().clone())
})
.collect())
.collect::<Vec<_>>();
let removed = self.enforce_limits();
Ok(results.into_iter().map(|res| match res {
Ok(ref hash) if removed.contains(hash) => Err(error::Error::from(error::ErrorKind::ImmediatelyDropped).into()),
other => other,
}).collect())
}
fn enforce_limits(&self) -> HashSet<ExHash<B>> {
let status = self.pool.read().status();
let ready_limit = &self.options.ready;
let future_limit = &self.options.future;
if ready_limit.is_exceeded(status.ready, status.ready_bytes)
|| future_limit.is_exceeded(status.future, status.future_bytes) {
// clean up the pool
let removed = {
let mut pool = self.pool.write();
let removed = pool.enforce_limits(ready_limit, future_limit)
.into_iter().map(|x| x.hash.clone()).collect::<HashSet<_>>();
// ban all removed transactions
self.rotator.ban(&std::time::Instant::now(), removed.iter().map(|x| x.clone()));
removed
};
// run notifications
let mut listener = self.listener.write();
for h in &removed {
listener.dropped(h, None);
}
removed
} else {
Default::default()
}
}
/// Imports one unverified extrinsic to the pool
@@ -148,7 +206,7 @@ impl<B: ChainApi> Pool<B> {
/// Import a single extrinsic and starts to watch their progress in the pool.
pub fn submit_and_watch(&self, at: &BlockId<B::Block>, xt: ExtrinsicFor<B>) -> Result<Watcher<ExHash<B>, BlockHash<B>>, B::Error> {
let hash = self.api.hash(&xt);
let hash = self.api.hash_and_length(&xt).0;
let watcher = self.listener.write().create_watcher(hash);
self.submit_one(at, xt)?;
Ok(watcher)
@@ -163,7 +221,7 @@ impl<B: ChainApi> Pool<B> {
pub fn prune(&self, at: &BlockId<B::Block>, parent: &BlockId<B::Block>, extrinsics: &[ExtrinsicFor<B>]) -> Result<(), B::Error> {
let mut tags = Vec::with_capacity(extrinsics.len());
// Get details of all extrinsics that are already in the pool
let hashes = extrinsics.iter().map(|extrinsic| self.api.hash(extrinsic)).collect::<Vec<_>>();
let hashes = extrinsics.iter().map(|extrinsic| self.api.hash_and_length(extrinsic).0).collect::<Vec<_>>();
let in_pool = self.pool.read().by_hash(&hashes);
{
// Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option<TransactionDetails>)`)
@@ -303,13 +361,12 @@ impl<B: ChainApi> Pool<B> {
Ok(())
}
}
impl<B: ChainApi> Pool<B> {
/// Create a new transaction pool.
pub fn new(_options: Options, api: B) -> Self {
pub fn new(options: Options, api: B) -> Self {
Pool {
api,
options,
listener: Default::default(),
pool: Default::default(),
import_notification_sinks: Default::default(),
@@ -359,8 +416,9 @@ impl<B: ChainApi> Pool<B> {
}
/// Returns transaction hash
pub fn hash_of(&self, xt: &ExtrinsicFor<B>) -> ExHash<B> {
self.api.hash(xt)
#[cfg(test)]
fn hash_of(&self, xt: &ExtrinsicFor<B>) -> ExHash<B> {
self.api.hash_and_length(xt).0
}
}
@@ -394,6 +452,7 @@ fn fire_events<H, H2, Ex>(
mod tests {
use super::*;
use futures::Stream;
use parity_codec::Encode;
use test_runtime::{Block, Extrinsic, Transfer, H256};
use assert_matches::assert_matches;
use crate::watcher;
@@ -440,8 +499,12 @@ mod tests {
}
/// Hash the extrinsic.
fn hash(&self, uxt: &ExtrinsicFor<Self>) -> Self::Hash {
(uxt.transfer().from.to_low_u64_be() << 5) + uxt.transfer().nonce
fn hash_and_length(&self, uxt: &ExtrinsicFor<Self>) -> (Self::Hash, usize) {
let len = uxt.encode().len();
(
(uxt.transfer().from.to_low_u64_be() << 5) + uxt.transfer().nonce,
len
)
}
}
@@ -586,6 +649,66 @@ mod tests {
assert!(pool.rotator.is_banned(&hash1));
}
#[test]
fn should_limit_futures() {
// given
let limit = Limit {
count: 100,
total_bytes: 200,
};
let pool = Pool::new(Options {
ready: limit.clone(),
future: limit.clone(),
}, TestApi::default());
let hash1 = pool.submit_one(&BlockId::Number(0), uxt(Transfer {
from: H256::from_low_u64_be(1),
to: H256::from_low_u64_be(2),
amount: 5,
nonce: 1,
})).unwrap();
assert_eq!(pool.status().future, 1);
// when
let hash2 = pool.submit_one(&BlockId::Number(0), uxt(Transfer {
from: H256::from_low_u64_be(2),
to: H256::from_low_u64_be(2),
amount: 5,
nonce: 10,
})).unwrap();
// then
assert_eq!(pool.status().future, 1);
assert!(pool.rotator.is_banned(&hash1));
assert!(!pool.rotator.is_banned(&hash2));
}
#[test]
fn should_error_if_reject_immediately() {
// given
let limit = Limit {
count: 100,
total_bytes: 10,
};
let pool = Pool::new(Options {
ready: limit.clone(),
future: limit.clone(),
}, TestApi::default());
// when
pool.submit_one(&BlockId::Number(0), uxt(Transfer {
from: H256::from_low_u64_be(1),
to: H256::from_low_u64_be(2),
amount: 5,
nonce: 1,
})).unwrap_err();
// then
assert_eq!(pool.status().ready, 0);
assert_eq!(pool.status().future, 0);
}
mod listener {
use super::*;
@@ -716,5 +839,42 @@ mod tests {
assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready)));
assert_eq!(stream.next(), Some(Ok(watcher::Status::Broadcast(peers))));
}
#[test]
fn should_trigger_dropped() {
// given
let limit = Limit {
count: 1,
total_bytes: 1000,
};
let pool = Pool::new(Options {
ready: limit.clone(),
future: limit.clone(),
}, TestApi::default());
let xt = uxt(Transfer {
from: H256::from_low_u64_be(1),
to: H256::from_low_u64_be(2),
amount: 5,
nonce: 0,
});
let watcher = pool.submit_and_watch(&BlockId::Number(0), xt).unwrap();
assert_eq!(pool.status().ready, 1);
// when
let xt = uxt(Transfer {
from: H256::from_low_u64_be(2),
to: H256::from_low_u64_be(1),
amount: 4,
nonce: 1,
});
pool.submit_one(&BlockId::Number(1), xt).unwrap();
assert_eq!(pool.status().ready, 1);
// then
let mut stream = watcher.into_stream().wait();
assert_eq!(stream.next(), Some(Ok(watcher::Status::Ready)));
assert_eq!(stream.next(), Some(Ok(watcher::Status::Dropped)));
}
}
}
@@ -34,9 +34,14 @@ use crate::error;
use crate::future::WaitingTransaction;
use crate::base_pool::Transaction;
/// An in-pool transaction reference.
///
/// Should be cheap to clone.
#[derive(Debug)]
struct TransactionRef<Hash, Ex> {
pub struct TransactionRef<Hash, Ex> {
/// The actual transaction data.
pub transaction: Arc<Transaction<Hash, Ex>>,
/// Unique id when transaction was inserted into the pool.
pub insertion_id: u64,
}
@@ -71,7 +76,7 @@ impl<Hash, Ex> PartialEq for TransactionRef<Hash, Ex> {
impl<Hash, Ex> Eq for TransactionRef<Hash, Ex> {}
#[derive(Debug)]
struct ReadyTx<Hash, Ex> {
pub struct ReadyTx<Hash, Ex> {
/// A reference to a transaction
pub transaction: TransactionRef<Hash, Ex>,
/// A list of transactions that get unlocked by this one
@@ -152,6 +157,7 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
///
/// The transaction needs to have all tags satisfied (be ready) by transactions
/// that are in this queue.
/// Returns transactions that were replaced by the one imported.
pub fn import(
&mut self,
tx: WaitingTransaction<Hash, Ex>,
@@ -204,6 +210,14 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
Ok(replaced)
}
/// Fold a list of ready transactions to compute a single value.
pub fn fold<R, F: FnMut(Option<R>, &ReadyTx<Hash, Ex>) -> Option<R>>(&mut self, f: F) -> Option<R> {
self.ready
.read()
.values()
.fold(None, f)
}
/// Returns true if given hash is part of the queue.
pub fn contains(&self, hash: &Hash) -> bool {
self.ready.read().contains_key(hash)
@@ -401,6 +415,10 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
self.ready.read().len()
}
/// Returns sum of encoding lengths of all transactions in this queue.
pub fn bytes(&self) -> usize {
self.ready.read().values().fold(0, |acc, tx| acc + tx.transaction.transaction.bytes)
}
}
pub struct BestIterator<Hash, Ex> {
@@ -476,6 +494,7 @@ mod tests {
fn tx(id: u8) -> Transaction<u64, Vec<u8>> {
Transaction {
data: vec![id],
bytes: 1,
hash: id as u64,
priority: 1,
valid_till: 2,
@@ -534,6 +553,7 @@ mod tests {
tx4.provides = vec![];
let tx5 = Transaction {
data: vec![5],
bytes: 1,
hash: 5,
priority: 1,
valid_till: u64::max_value(), // use the max_value() here for testing.
@@ -114,6 +114,7 @@ mod tests {
let hash = 5u64;
let tx = Transaction {
data: (),
bytes: 1,
hash: hash.clone(),
priority: 5,
valid_till: 1,
@@ -178,6 +179,7 @@ mod tests {
let hash = i;
Transaction {
data: (),
bytes: 2,
hash,
priority: 5,
valid_till,