diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs
index 8469ec6289..6524662317 100644
--- a/substrate/bin/node/bench/src/construct.rs
+++ b/substrate/bin/node/bench/src/construct.rs
@@ -171,6 +171,7 @@ impl core::Benchmark for ConstructionBenchmark {
inherent_data_providers.create_inherent_data().expect("Create inherent data failed"),
Default::default(),
std::time::Duration::from_secs(20),
+ None,
),
).map(|r| r.block).expect("Proposing failed");
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index ce0ffb2cec..5fa7aa00df 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -679,6 +679,7 @@ mod tests {
inherent_data,
digest,
std::time::Duration::from_secs(1),
+ None,
).await
}).expect("Error making test block").block;
diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs
index 910abfad5a..c8277d3b5d 100644
--- a/substrate/client/basic-authorship/src/basic_authorship.rs
+++ b/substrate/client/basic-authorship/src/basic_authorship.rs
@@ -22,7 +22,7 @@
use std::{pin::Pin, time, sync::Arc};
use sc_client_api::backend;
-use codec::Decode;
+use codec::{Decode, Encode};
use sp_consensus::{evaluation, Proposal, ProofRecording, DisableProofRecording, EnableProofRecording};
use sp_core::traits::SpawnNamed;
use sp_inherents::InherentData;
@@ -42,14 +42,14 @@ use std::marker::PhantomData;
use prometheus_endpoint::Registry as PrometheusRegistry;
use sc_proposer_metrics::MetricsLink as PrometheusMetrics;
-/// Default maximum block size in bytes used by [`Proposer`].
+/// Default block size limit in bytes used by [`Proposer`].
///
-/// Can be overwritten by [`ProposerFactory::set_maximum_block_size`].
+/// Can be overwritten by [`ProposerFactory::set_block_size_limit`].
///
/// Be aware that there is also an upper packet size on what the networking code
/// will accept. If the block doesn't fit in such a package, it can not be
/// transferred to other nodes.
-pub const DEFAULT_MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512;
+pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
/// Proposer factory.
pub struct ProposerFactory {
@@ -60,8 +60,14 @@ pub struct ProposerFactory {
transaction_pool: Arc,
/// Prometheus Link,
metrics: PrometheusMetrics,
- max_block_size: usize,
+ /// The default block size limit.
+ ///
+ /// If no `block_size_limit` is passed to [`Proposer::propose`], this block size limit will be
+ /// used.
+ default_block_size_limit: usize,
telemetry: Option,
+ /// When estimating the block size, should the proof be included?
+ include_proof_in_block_size_estimation: bool,
/// phantom member to pin the `Backend`/`ProofRecording` type.
_phantom: PhantomData<(B, PR)>,
}
@@ -81,9 +87,10 @@ impl ProposerFactory {
spawn_handle: Box::new(spawn_handle),
transaction_pool,
metrics: PrometheusMetrics::new(prometheus),
- max_block_size: DEFAULT_MAX_BLOCK_SIZE,
+ default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
telemetry,
client,
+ include_proof_in_block_size_estimation: false,
_phantom: PhantomData,
}
}
@@ -93,6 +100,9 @@ impl ProposerFactory {
/// Create a new proposer factory with proof recording enabled.
///
/// Each proposer created by this instance will record a proof while building a block.
+ ///
+ /// This will also include the proof into the estimation of the block size. This can be disabled
+ /// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`].
pub fn with_proof_recording(
spawn_handle: impl SpawnNamed + 'static,
client: Arc,
@@ -101,24 +111,32 @@ impl ProposerFactory {
telemetry: Option,
) -> Self {
ProposerFactory {
- spawn_handle: Box::new(spawn_handle),
client,
+ spawn_handle: Box::new(spawn_handle),
transaction_pool,
metrics: PrometheusMetrics::new(prometheus),
- max_block_size: DEFAULT_MAX_BLOCK_SIZE,
+ default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
telemetry,
+ include_proof_in_block_size_estimation: true,
_phantom: PhantomData,
}
}
+
+ /// Disable the proof inclusion when estimating the block size.
+ pub fn disable_proof_in_block_size_estimation(&mut self) {
+ self.include_proof_in_block_size_estimation = false;
+ }
}
impl ProposerFactory {
- /// Set the maximum block size in bytes.
+ /// Set the default block size limit in bytes.
///
- /// The default value for the maximum block size is:
- /// [`DEFAULT_MAX_BLOCK_SIZE`].
- pub fn set_maximum_block_size(&mut self, size: usize) {
- self.max_block_size = size;
+ /// The default value for the block size limit is:
+ /// [`DEFAULT_BLOCK_SIZE_LIMIT`].
+ ///
+ /// If there is no block size limit passed to [`Proposer::propose`], this value will be used.
+ pub fn set_default_block_size_limit(&mut self, limit: usize) {
+ self.default_block_size_limit = limit;
}
}
@@ -152,9 +170,10 @@ impl ProposerFactory
transaction_pool: self.transaction_pool.clone(),
now,
metrics: self.metrics.clone(),
- max_block_size: self.max_block_size,
+ default_block_size_limit: self.default_block_size_limit,
telemetry: self.telemetry.clone(),
_phantom: PhantomData,
+ include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
};
proposer
@@ -195,7 +214,8 @@ pub struct Proposer {
transaction_pool: Arc,
now: Box time::Instant + Send + Sync>,
metrics: PrometheusMetrics,
- max_block_size: usize,
+ default_block_size_limit: usize,
+ include_proof_in_block_size_estimation: bool,
telemetry: Option,
_phantom: PhantomData<(B, PR)>,
}
@@ -225,6 +245,7 @@ impl sp_consensus::Proposer for
inherent_data: InherentData,
inherent_digests: DigestFor,
max_duration: time::Duration,
+ block_size_limit: Option,
) -> Self::Proposal {
let (tx, rx) = oneshot::channel();
let spawn_handle = self.spawn_handle.clone();
@@ -236,6 +257,7 @@ impl sp_consensus::Proposer for
inherent_data,
inherent_digests,
deadline,
+ block_size_limit,
).await;
if tx.send(res).is_err() {
trace!("Could not send block production result to proposer!");
@@ -264,6 +286,7 @@ impl Proposer
inherent_data: InherentData,
inherent_digests: DigestFor,
deadline: time::Instant,
+ block_size_limit: Option,
) -> Result, PR::Proof>, sp_blockchain::Error> {
/// If the block is full we will attempt to push at most
/// this number of transactions before quitting for real.
@@ -297,7 +320,9 @@ impl Proposer
let mut unqueue_invalid = Vec::new();
let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse();
- let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse();
+ let mut t2 = futures_timer::Delay::new(
+ deadline.saturating_duration_since((self.now)()) / 8,
+ ).fuse();
let pending_iterator = select! {
res = t1 => res,
@@ -311,8 +336,13 @@ impl Proposer
},
};
+ let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
+
debug!("Attempting to push transactions from the pool.");
debug!("Pool status: {:?}", self.transaction_pool.status());
+ let mut transaction_pushed = false;
+ let mut hit_block_size_limit = false;
+
for pending_tx in pending_iterator {
if (self.now)() > deadline {
debug!(
@@ -324,9 +354,30 @@ impl Proposer
let pending_tx_data = pending_tx.data().clone();
let pending_tx_hash = pending_tx.hash().clone();
+
+ let block_size = block_builder.estimate_block_size(
+ self.include_proof_in_block_size_estimation,
+ );
+ if block_size + pending_tx_data.encoded_size() > block_size_limit {
+ if skipped < MAX_SKIPPED_TRANSACTIONS {
+ skipped += 1;
+ debug!(
+ "Transaction would overflow the block size limit, \
+ but will try {} more transactions before quitting.",
+ MAX_SKIPPED_TRANSACTIONS - skipped,
+ );
+ continue;
+ } else {
+ debug!("Reached block size limit, proceeding with proposing.");
+ hit_block_size_limit = true;
+ break;
+ }
+ }
+
trace!("[{:?}] Pushing to the block.", pending_tx_hash);
match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) {
Ok(()) => {
+ transaction_pushed = true;
debug!("[{:?}] Pushed to the block.", pending_tx_hash);
}
Err(ApplyExtrinsicFailed(Validity(e)))
@@ -356,6 +407,13 @@ impl Proposer
}
}
+ if hit_block_size_limit && !transaction_pushed {
+ warn!(
+ "Hit block size limit of `{}` without including any transaction!",
+ block_size_limit,
+ );
+ }
+
self.transaction_pool.remove_invalid(&unqueue_invalid);
let (block, storage_changes, proof) = block_builder.build()?.into_inner();
@@ -367,7 +425,8 @@ impl Proposer
}
);
- info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]",
+ info!(
+ "🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]",
block.header().number(),
::Hash::from(block.header().hash()),
block.header().parent_hash(),
@@ -394,7 +453,6 @@ impl Proposer
&block,
&self.parent_hash,
self.parent_number,
- self.max_block_size,
) {
error!("Failed to evaluate authored block: {:?}", err);
}
@@ -421,6 +479,7 @@ mod tests {
use sp_runtime::traits::NumberFor;
use sc_client_api::Backend;
use futures::executor::block_on;
+ use sp_consensus::Environment;
const SOURCE: TransactionSource = TransactionSource::External;
@@ -494,7 +553,7 @@ mod tests {
// when
let deadline = time::Duration::from_secs(3);
let block = block_on(
- proposer.propose(Default::default(), Default::default(), deadline)
+ proposer.propose(Default::default(), Default::default(), deadline, None)
).map(|r| r.block).unwrap();
// then
@@ -540,7 +599,7 @@ mod tests {
let deadline = time::Duration::from_secs(1);
block_on(
- proposer.propose(Default::default(), Default::default(), deadline)
+ proposer.propose(Default::default(), Default::default(), deadline, None)
).map(|r| r.block).unwrap();
}
@@ -587,7 +646,7 @@ mod tests {
let deadline = time::Duration::from_secs(9);
let proposal = block_on(
- proposer.propose(Default::default(), Default::default(), deadline),
+ proposer.propose(Default::default(), Default::default(), deadline, None),
).unwrap();
assert_eq!(proposal.block.extrinsics().len(), 1);
@@ -669,7 +728,7 @@ mod tests {
// when
let deadline = time::Duration::from_secs(9);
let block = block_on(
- proposer.propose(Default::default(), Default::default(), deadline)
+ proposer.propose(Default::default(), Default::default(), deadline, None)
).map(|r| r.block).unwrap();
// then
@@ -704,4 +763,82 @@ mod tests {
let block = propose_block(&client, 1, 2, 5);
block_on(client.import(BlockOrigin::Own, block)).unwrap();
}
+
+ #[test]
+ fn should_cease_building_block_when_block_limit_is_reached() {
+ let client = Arc::new(substrate_test_runtime_client::new());
+ let spawner = sp_core::testing::TaskExecutor::new();
+ let txpool = BasicPool::new_full(
+ Default::default(),
+ true.into(),
+ None,
+ spawner.clone(),
+ client.clone(),
+ );
+ let genesis_header = client.header(&BlockId::Number(0u64))
+ .expect("header get error")
+ .expect("there should be header");
+
+ let extrinsics_num = 4;
+ let extrinsics = (0..extrinsics_num)
+ .map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))
+ .collect::>();
+
+ let block_limit = genesis_header.encoded_size()
+ + extrinsics.iter().take(extrinsics_num - 1).map(Encode::encoded_size).sum::()
+ + Vec::::new().encoded_size();
+
+ block_on(
+ txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics)
+ ).unwrap();
+
+ block_on(txpool.maintain(chain_event(genesis_header.clone())));
+
+ let mut proposer_factory = ProposerFactory::new(
+ spawner.clone(),
+ client.clone(),
+ txpool.clone(),
+ None,
+ None,
+ );
+
+ let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
+
+ // Give it enough time
+ let deadline = time::Duration::from_secs(300);
+ let block = block_on(
+ proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit))
+ ).map(|r| r.block).unwrap();
+
+ // Based on the block limit, one transaction shouldn't be included.
+ assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
+
+ let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
+
+ let block = block_on(
+ proposer.propose(Default::default(), Default::default(), deadline, None,
+ )).map(|r| r.block).unwrap();
+
+ // Without a block limit we should include all of them
+ assert_eq!(block.extrinsics().len(), extrinsics_num);
+
+ let mut proposer_factory = ProposerFactory::with_proof_recording(
+ spawner.clone(),
+ client.clone(),
+ txpool.clone(),
+ None,
+ None,
+ );
+
+ let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
+
+ // Give it enough time
+ let block = block_on(
+ proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit))
+ ).map(|r| r.block).unwrap();
+
+ // The block limit didn't changed, but we now include the proof in the estimation of the
+ // block size and thus, one less transaction should fit into the limit.
+ assert_eq!(block.extrinsics().len(), extrinsics_num - 2);
+ }
}
diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs
index acaf85db76..133b833cdd 100644
--- a/substrate/client/basic-authorship/src/lib.rs
+++ b/substrate/client/basic-authorship/src/lib.rs
@@ -62,6 +62,7 @@
//! Default::default(),
//! Default::default(),
//! Duration::from_secs(2),
+//! None,
//! );
//!
//! // We wait until the proposition is performed.
@@ -72,4 +73,4 @@
mod basic_authorship;
-pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_MAX_BLOCK_SIZE};
+pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_BLOCK_SIZE_LIMIT};
diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs
index 4893072a71..7d391f8fb8 100644
--- a/substrate/client/block-builder/src/lib.rs
+++ b/substrate/client/block-builder/src/lib.rs
@@ -135,6 +135,8 @@ pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> {
block_id: BlockId,
parent_hash: Block::Hash,
backend: &'a B,
+ /// The estimated size of the block header.
+ estimated_header_size: usize,
}
impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B>
@@ -165,6 +167,8 @@ where
inherent_digests,
);
+ let estimated_header_size = header.encoded_size();
+
let mut api = api.runtime_api();
if record_proof.yes() {
@@ -183,6 +187,7 @@ where
api,
block_id,
backend,
+ estimated_header_size,
})
}
@@ -270,6 +275,20 @@ where
))
}).map_err(|e| Error::Application(Box::new(e)))
}
+
+ /// Estimate the size of the block in the current state.
+ ///
+ /// If `include_proof` is `true`, the estimated size of the storage proof will be added
+ /// to the estimation.
+ pub fn estimate_block_size(&self, include_proof: bool) -> usize {
+ let size = self.estimated_header_size + self.extrinsics.encoded_size();
+
+ if include_proof {
+ size + self.api.proof_recorder().map(|pr| pr.estimate_encoded_size()).unwrap_or(0)
+ } else {
+ size
+ }
+ }
}
#[cfg(test)]
diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs
index 77dac0f754..3c72f359f8 100644
--- a/substrate/client/consensus/aura/src/lib.rs
+++ b/substrate/client/consensus/aura/src/lib.rs
@@ -629,6 +629,7 @@ mod tests {
_: InherentData,
digests: DigestFor,
_: Duration,
+ _: Option,
) -> Self::Proposal {
let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into());
@@ -887,6 +888,7 @@ mod tests {
ends_at: Instant::now() + Duration::from_secs(100),
inherent_data: InherentData::new(),
duration: Duration::from_millis(1000),
+ block_size_limit: None,
},
)).unwrap();
diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs
index 839d38b94a..9949da61da 100644
--- a/substrate/client/consensus/babe/src/tests.rs
+++ b/substrate/client/consensus/babe/src/tests.rs
@@ -182,6 +182,7 @@ impl Proposer for DummyProposer {
_: InherentData,
pre_digests: DigestFor,
_: Duration,
+ _: Option,
) -> Self::Proposal {
self.propose_with(pre_digests)
}
diff --git a/substrate/client/consensus/manual-seal/src/seal_block.rs b/substrate/client/consensus/manual-seal/src/seal_block.rs
index 23a560cebd..b21630f037 100644
--- a/substrate/client/consensus/manual-seal/src/seal_block.rs
+++ b/substrate/client/consensus/manual-seal/src/seal_block.rs
@@ -127,6 +127,7 @@ pub async fn seal_block(
id.clone(),
digest,
Duration::from_secs(MAX_PROPOSAL_DURATION),
+ None,
).map_err(|err| Error::StringError(format!("{:?}", err))).await?;
if proposal.block.extrinsics().len() == inherents_len && !create_empty {
diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs
index ea2e30afdc..bcbc200932 100644
--- a/substrate/client/consensus/pow/src/lib.rs
+++ b/substrate/client/consensus/pow/src/lib.rs
@@ -669,6 +669,7 @@ pub fn start_mining_worker(
inherent_data,
inherent_digest,
build_time.clone(),
+ None,
).await {
Ok(x) => x,
Err(err) => {
diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs
index c1f13fea1f..5157f381e6 100644
--- a/substrate/client/consensus/slots/src/lib.rs
+++ b/substrate/client/consensus/slots/src/lib.rs
@@ -313,6 +313,7 @@ pub trait SimpleSlotWorker {
logs,
},
proposing_remaining_duration.mul_f32(0.98),
+ None,
).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)));
let proposal = match futures::future::select(proposing, proposing_remaining).await {
@@ -535,7 +536,7 @@ pub enum Error where T: Debug {
SlotDurationInvalid(SlotDuration),
}
-/// A slot duration. Create with `get_or_compute`.
+/// A slot duration. Create with [`get_or_compute`](Self::get_or_compute).
// The internal member should stay private here to maintain invariants of
// `get_or_compute`.
#[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq)]
@@ -793,6 +794,7 @@ mod test {
timestamp: Default::default(),
inherent_data: Default::default(),
ends_at: Instant::now(),
+ block_size_limit: None,
}
}
diff --git a/substrate/client/consensus/slots/src/slots.rs b/substrate/client/consensus/slots/src/slots.rs
index d7ed1eda64..4057a6d0d1 100644
--- a/substrate/client/consensus/slots/src/slots.rs
+++ b/substrate/client/consensus/slots/src/slots.rs
@@ -58,6 +58,10 @@ pub struct SlotInfo {
pub inherent_data: InherentData,
/// Slot duration.
pub duration: Duration,
+ /// Some potential block size limit for the block to be authored at this slot.
+ ///
+ /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose).
+ pub block_size_limit: Option,
}
impl SlotInfo {
@@ -69,12 +73,14 @@ impl SlotInfo {
timestamp: sp_timestamp::Timestamp,
inherent_data: InherentData,
duration: Duration,
+ block_size_limit: Option,
) -> Self {
Self {
slot,
timestamp,
inherent_data,
duration,
+ block_size_limit,
ends_at: Instant::now() + time_until_next(timestamp.as_duration(), duration),
}
}
@@ -147,6 +153,7 @@ impl Slots {
timestamp,
inherent_data,
self.slot_duration,
+ None,
))
}
}
diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs
index 2704676207..a2501891b3 100644
--- a/substrate/client/db/src/bench.rs
+++ b/substrate/client/db/src/bench.rs
@@ -23,7 +23,7 @@ use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use hash_db::{Prefix, Hasher};
-use sp_trie::{MemoryDB, prefixed_key, StorageProof};
+use sp_trie::{MemoryDB, prefixed_key};
use sp_core::{
storage::{ChildInfo, TrackedStorageKey},
hexdisplay::HexDisplay
@@ -34,7 +34,6 @@ use sp_state_machine::{
DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, ProofRecorder,
};
use kvdb::{KeyValueDB, DBTransaction};
-use codec::Encode;
use crate::storage_cache::{CachingState, SharedCache, new_shared_cache};
type DbState = sp_state_machine::TrieBackend<
@@ -45,7 +44,7 @@ type State = CachingState, B>;
struct StorageDb {
db: Arc,
- proof_recorder: Option>>,
+ proof_recorder: Option>,
_block: std::marker::PhantomData,
}
@@ -53,12 +52,12 @@ impl sp_state_machine::Storage> for StorageDb Result