diff --git a/Cargo.lock b/Cargo.lock
index 1fc6175ad3a0..ed59fa2039af 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6983,7 +6983,6 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"polkadot-primitives-test-helpers",
- "rand 0.8.5",
"sp-application-crypto",
"sp-keystore",
"thiserror",
diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml
index c6d78582cfc9..7c07118f1f3f 100644
--- a/node/core/provisioner/Cargo.toml
+++ b/node/core/provisioner/Cargo.toml
@@ -13,7 +13,6 @@ polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
-rand = "0.8.5"
futures-timer = "3.0.2"
fatality = "0.0.6"
diff --git a/node/core/provisioner/src/disputes/mod.rs b/node/core/provisioner/src/disputes/mod.rs
index 4fcfa5b330cb..fab70a054698 100644
--- a/node/core/provisioner/src/disputes/mod.rs
+++ b/node/core/provisioner/src/disputes/mod.rs
@@ -14,12 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data. It contains two
-//! different implementations, extracted in two separate modules - `random_selection` and `prioritized_selection`. Which
-//! implementation will be executed depends on the version of the runtime. Runtime v2 supports `random_selection`. Runtime
-//! `v3` and above - `prioritized_selection`. The entrypoint to these implementations is the `select_disputes` function.
-//! `prioritized_selection` is considered superior and will be the default one in the future. Refer to the documentation of
-//! the modules for more details about each implementation.
+//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data.
use crate::LOG_TARGET;
use futures::channel::oneshot;
@@ -49,5 +44,3 @@ async fn request_votes(
}
pub(crate) mod prioritized_selection;
-
-pub(crate) mod random_selection;
diff --git a/node/core/provisioner/src/disputes/random_selection/mod.rs b/node/core/provisioner/src/disputes/random_selection/mod.rs
deleted file mode 100644
index 06d4ef34b665..000000000000
--- a/node/core/provisioner/src/disputes/random_selection/mod.rs
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (C) Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot. If not, see .
-
-//! This module selects all RECENT disputes, fetches the votes for them from dispute-coordinator and
-//! returns them as `MultiDisputeStatementSet`. If the RECENT disputes are more than
-//! `MAX_DISPUTES_FORWARDED_TO_RUNTIME` constant - the ACTIVE disputes plus a random selection of
-//! RECENT disputes (up to `MAX_DISPUTES_FORWARDED_TO_RUNTIME`) are returned instead.
-//! If the ACTIVE disputes are also above `MAX_DISPUTES_FORWARDED_TO_RUNTIME` limit - a random selection
-//! of them is generated.
-
-use crate::{metrics, LOG_TARGET};
-use futures::channel::oneshot;
-use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer};
-use polkadot_primitives::{
- CandidateHash, DisputeStatement, DisputeStatementSet, MultiDisputeStatementSet, SessionIndex,
-};
-use std::collections::HashSet;
-
-/// The maximum number of disputes Provisioner will include in the inherent data.
-/// Serves as a protection not to flood the Runtime with excessive data.
-const MAX_DISPUTES_FORWARDED_TO_RUNTIME: usize = 1_000;
-
-#[derive(Debug)]
-enum RequestType {
- /// Query recent disputes, could be an excessive amount.
- Recent,
- /// Query the currently active and very recently concluded disputes.
- Active,
-}
-
-/// Request open disputes identified by `CandidateHash` and the `SessionIndex`.
-/// Returns only confirmed/concluded disputes. The rest are filtered out.
-async fn request_confirmed_disputes(
- sender: &mut impl overseer::ProvisionerSenderTrait,
- active_or_recent: RequestType,
-) -> Vec<(SessionIndex, CandidateHash)> {
- let (tx, rx) = oneshot::channel();
- let msg = match active_or_recent {
- RequestType::Recent => DisputeCoordinatorMessage::RecentDisputes(tx),
- RequestType::Active => DisputeCoordinatorMessage::ActiveDisputes(tx),
- };
-
- sender.send_unbounded_message(msg);
- let disputes = match rx.await {
- Ok(r) => r,
- Err(oneshot::Canceled) => {
- gum::warn!(
- target: LOG_TARGET,
- "Channel closed: unable to gather {:?} disputes",
- active_or_recent
- );
- Vec::new()
- },
- };
-
- disputes
- .into_iter()
- .filter(|d| d.2.is_confirmed_concluded())
- .map(|d| (d.0, d.1))
- .collect()
-}
-
-/// Extend `acc` by `n` random, picks of not-yet-present in `acc` items of `recent` without repetition and additions of recent.
-fn extend_by_random_subset_without_repetition(
- acc: &mut Vec<(SessionIndex, CandidateHash)>,
- extension: Vec<(SessionIndex, CandidateHash)>,
- n: usize,
-) {
- use rand::Rng;
-
- let lut = acc.iter().cloned().collect::>();
-
- let mut unique_new =
- extension.into_iter().filter(|recent| !lut.contains(recent)).collect::>();
-
- // we can simply add all
- if unique_new.len() <= n {
- acc.extend(unique_new)
- } else {
- acc.reserve(n);
- let mut rng = rand::thread_rng();
- for _ in 0..n {
- let idx = rng.gen_range(0..unique_new.len());
- acc.push(unique_new.swap_remove(idx));
- }
- }
- // assure sorting stays candid according to session index
- acc.sort_unstable_by(|a, b| a.0.cmp(&b.0));
-}
-
-pub async fn select_disputes(
- sender: &mut Sender,
- metrics: &metrics::Metrics,
-) -> MultiDisputeStatementSet
-where
- Sender: overseer::ProvisionerSenderTrait,
-{
- gum::trace!(target: LOG_TARGET, "Selecting disputes for inherent data using random selection");
-
- // We use `RecentDisputes` instead of `ActiveDisputes` because redundancy is fine.
- // It's heavier than `ActiveDisputes` but ensures that everything from the dispute
- // window gets on-chain, unlike `ActiveDisputes`.
- // In case of an overload condition, we limit ourselves to active disputes, and fill up to the
- // upper bound of disputes to pass to wasm `fn create_inherent_data`.
- // If the active ones are already exceeding the bounds, randomly select a subset.
- let recent = request_confirmed_disputes(sender, RequestType::Recent).await;
- let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME {
- gum::warn!(
- target: LOG_TARGET,
- "Recent disputes are excessive ({} > {}), reduce to active ones, and selected",
- recent.len(),
- MAX_DISPUTES_FORWARDED_TO_RUNTIME
- );
- let mut active = request_confirmed_disputes(sender, RequestType::Active).await;
- let n_active = active.len();
- let active = if active.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME {
- let mut picked = Vec::with_capacity(MAX_DISPUTES_FORWARDED_TO_RUNTIME);
- extend_by_random_subset_without_repetition(
- &mut picked,
- active,
- MAX_DISPUTES_FORWARDED_TO_RUNTIME,
- );
- picked
- } else {
- extend_by_random_subset_without_repetition(
- &mut active,
- recent,
- MAX_DISPUTES_FORWARDED_TO_RUNTIME.saturating_sub(n_active),
- );
- active
- };
- active
- } else {
- recent
- };
-
- // Load all votes for all disputes from the coordinator.
- let dispute_candidate_votes = super::request_votes(sender, disputes).await;
-
- // Transform all `CandidateVotes` into `MultiDisputeStatementSet`.
- dispute_candidate_votes
- .into_iter()
- .map(|(session_index, candidate_hash, votes)| {
- let valid_statements = votes
- .valid
- .into_iter()
- .map(|(i, (s, sig))| (DisputeStatement::Valid(s), i, sig));
-
- let invalid_statements = votes
- .invalid
- .into_iter()
- .map(|(i, (s, sig))| (DisputeStatement::Invalid(s), i, sig));
-
- metrics.inc_valid_statements_by(valid_statements.len());
- metrics.inc_invalid_statements_by(invalid_statements.len());
- metrics.inc_dispute_statement_sets_by(1);
-
- DisputeStatementSet {
- candidate_hash,
- session: session_index,
- statements: valid_statements.chain(invalid_statements).collect(),
- }
- })
- .collect()
-}
diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs
index 50254ce9bef9..3ae297fee736 100644
--- a/node/core/provisioner/src/lib.rs
+++ b/node/core/provisioner/src/lib.rs
@@ -393,16 +393,17 @@ async fn send_inherent_data(
"Selecting disputes"
);
- let disputes = match has_required_runtime(
- from_job,
- leaf.hash,
- PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT,
- )
- .await
- {
- true => disputes::prioritized_selection::select_disputes(from_job, metrics, leaf).await,
- false => disputes::random_selection::select_disputes(from_job, metrics).await,
- };
+ debug_assert!(
+ has_required_runtime(
+ from_job,
+ leaf.hash,
+ PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT,
+ )
+ .await,
+ "randomized selection no longer supported, please upgrade your runtime!"
+ );
+
+ let disputes = disputes::prioritized_selection::select_disputes(from_job, metrics, leaf).await;
gum::trace!(
target: LOG_TARGET,
diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs
index 6c6258b2b805..3498c0762d4c 100644
--- a/primitives/src/v5/mod.rs
+++ b/primitives/src/v5/mod.rs
@@ -1372,7 +1372,7 @@ impl AsRef for DisputeStatementSet {
pub type MultiDisputeStatementSet = Vec;
/// A _checked_ set of dispute statements.
-#[derive(Clone, PartialEq, RuntimeDebug)]
+#[derive(Clone, PartialEq, RuntimeDebug, Encode)]
pub struct CheckedDisputeStatementSet(DisputeStatementSet);
impl AsRef for CheckedDisputeStatementSet {
diff --git a/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs
index 639164af522b..9a9a3a3dffb5 100644
--- a/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs
+++ b/runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs
@@ -17,27 +17,26 @@
//! Autogenerated weights for `runtime_parachains::paras_inherent`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-06-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=kusama-dev
// --steps=50
// --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=runtime_parachains::paras_inherent
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
+// --pallet=runtime_parachains::paras_inherent
+// --chain=kusama-dev
// --header=./file_header.txt
-// --output=./runtime/kusama/src/weights/runtime_parachains_paras_inherent.rs
+// --output=./runtime/kusama/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -115,11 +114,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `50671`
// Estimated: `56611 + v * (23 ±0)`
- // Minimum execution time: 982_516_000 picoseconds.
- Weight::from_parts(453_473_787, 0)
+ // Minimum execution time: 1_008_586_000 picoseconds.
+ Weight::from_parts(471_892_709, 0)
.saturating_add(Weight::from_parts(0, 56611))
- // Standard Error: 21_034
- .saturating_add(Weight::from_parts(57_212_691, 0).saturating_mul(v.into()))
+ // Standard Error: 15_634
+ .saturating_add(Weight::from_parts(56_433_120, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(27))
.saturating_add(T::DbWeight::get().writes(15))
.saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into()))
@@ -186,8 +185,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42504`
// Estimated: `48444`
- // Minimum execution time: 465_674_000 picoseconds.
- Weight::from_parts(483_678_000, 0)
+ // Minimum execution time: 469_409_000 picoseconds.
+ Weight::from_parts(487_865_000, 0)
.saturating_add(Weight::from_parts(0, 48444))
.saturating_add(T::DbWeight::get().reads(25))
.saturating_add(T::DbWeight::get().writes(16))
@@ -259,11 +258,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42540`
// Estimated: `48480`
- // Minimum execution time: 6_886_272_000 picoseconds.
- Weight::from_parts(1_235_371_688, 0)
+ // Minimum execution time: 6_874_816_000 picoseconds.
+ Weight::from_parts(1_229_912_739, 0)
.saturating_add(Weight::from_parts(0, 48480))
- // Standard Error: 28_012
- .saturating_add(Weight::from_parts(56_395_511, 0).saturating_mul(v.into()))
+ // Standard Error: 27_352
+ .saturating_add(Weight::from_parts(56_137_302, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(28))
.saturating_add(T::DbWeight::get().writes(15))
}
@@ -337,8 +336,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42567`
// Estimated: `48507`
- // Minimum execution time: 42_215_280_000 picoseconds.
- Weight::from_parts(43_255_598_000, 0)
+ // Minimum execution time: 41_075_073_000 picoseconds.
+ Weight::from_parts(43_753_587_000, 0)
.saturating_add(Weight::from_parts(0, 48507))
.saturating_add(T::DbWeight::get().reads(30))
.saturating_add(T::DbWeight::get().writes(15))
diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs
index 396407d5c3d3..bab896c419f6 100644
--- a/runtime/parachains/src/mock.rs
+++ b/runtime/parachains/src/mock.rs
@@ -32,6 +32,7 @@ use frame_support::{
weights::{Weight, WeightMeter},
};
use frame_support_test::TestRandomness;
+use frame_system::limits;
use parity_scale_codec::Decode;
use primitives::{
AuthorityDiscoveryId, Balance, BlockNumber, CandidateHash, Moment, SessionIndex, UpwardMessage,
@@ -42,7 +43,7 @@ use sp_io::TestExternalities;
use sp_runtime::{
traits::{AccountIdConversion, BlakeTwo256, IdentityLookup},
transaction_validity::TransactionPriority,
- BuildStorage, Permill,
+ BuildStorage, Perbill, Permill,
};
use std::{cell::RefCell, collections::HashMap};
@@ -81,10 +82,11 @@ where
parameter_types! {
pub const BlockHashCount: u32 = 250;
- pub BlockWeights: frame_system::limits::BlockWeights =
+ pub static BlockWeights: frame_system::limits::BlockWeights =
frame_system::limits::BlockWeights::simple_max(
Weight::from_parts(4 * 1024 * 1024, u64::MAX),
);
+ pub static BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio(u32::MAX, Perbill::from_percent(75));
}
pub type AccountId = u64;
@@ -92,7 +94,7 @@ pub type AccountId = u64;
impl frame_system::Config for Test {
type BaseCallFilter = frame_support::traits::Everything;
type BlockWeights = BlockWeights;
- type BlockLength = ();
+ type BlockLength = BlockLength;
type DbWeight = ();
type RuntimeOrigin = RuntimeOrigin;
type RuntimeCall = RuntimeCall;
diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs
index 0afab5e58d95..61be0d4adae8 100644
--- a/runtime/parachains/src/paras_inherent/mod.rs
+++ b/runtime/parachains/src/paras_inherent/mod.rs
@@ -53,7 +53,6 @@ use rand::{seq::SliceRandom, SeedableRng};
use scale_info::TypeInfo;
use sp_runtime::traits::{Header as HeaderT, One};
use sp_std::{
- cmp::Ordering,
collections::{btree_map::BTreeMap, btree_set::BTreeSet},
prelude::*,
vec::Vec,
@@ -62,12 +61,13 @@ use sp_std::{
mod misc;
mod weights;
+use self::weights::checked_multi_dispute_statement_sets_weight;
pub use self::{
misc::{IndexedRetain, IsSortedBy},
weights::{
backed_candidate_weight, backed_candidates_weight, dispute_statement_set_weight,
- multi_dispute_statement_sets_weight, paras_inherent_total_weight, signed_bitfields_weight,
- TestWeightInfo, WeightInfo,
+ multi_dispute_statement_sets_weight, paras_inherent_total_weight, signed_bitfield_weight,
+ signed_bitfields_weight, TestWeightInfo, WeightInfo,
},
};
@@ -264,8 +264,8 @@ pub mod pallet {
#[pallet::weight((
paras_inherent_total_weight::(
data.backed_candidates.as_slice(),
- data.bitfields.as_slice(),
- data.disputes.as_slice(),
+ &data.bitfields,
+ &data.disputes,
),
DispatchClass::Mandatory,
))]
@@ -356,17 +356,47 @@ impl Pallet {
let now = >::block_number();
let candidates_weight = backed_candidates_weight::(&backed_candidates);
- let bitfields_weight = signed_bitfields_weight::(bitfields.len());
- let disputes_weight = multi_dispute_statement_sets_weight::(&disputes);
- let max_block_weight = ::BlockWeights::get().max_block;
+ let bitfields_weight = signed_bitfields_weight::(&bitfields);
+ let disputes_weight = multi_dispute_statement_sets_weight::(&disputes);
- METRICS
- .on_before_filter((candidates_weight + bitfields_weight + disputes_weight).ref_time());
+ let all_weight_before = candidates_weight + bitfields_weight + disputes_weight;
+
+ METRICS.on_before_filter(all_weight_before.ref_time());
+ log::debug!(target: LOG_TARGET, "Size before filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_before.proof_size(), candidates_weight.proof_size() + bitfields_weight.proof_size(), disputes_weight.proof_size());
+ log::debug!(target: LOG_TARGET, "Time weight before filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_before.ref_time(), candidates_weight.ref_time() + bitfields_weight.ref_time(), disputes_weight.ref_time());
let current_session = >::session_index();
let expected_bits = >::availability_cores().len();
let validator_public = shared::Pallet::::active_validator_keys();
+ // We are assuming (incorrectly) to have all the weight (for the mandatory class or even
+ // full block) available to us. This can lead to slightly overweight blocks, which still
+ // works as the dispatch class for `enter` is `Mandatory`. By using the `Mandatory`
+ // dispatch class, the upper layers impose no limit on the weight of this inherent, instead
+ // we limit ourselves and make sure to stay within reasonable bounds. It might make sense
+ // to subtract BlockWeights::base_block to reduce chances of becoming overweight.
+ let max_block_weight = {
+ let dispatch_class = DispatchClass::Mandatory;
+ let max_block_weight_full = ::BlockWeights::get();
+ log::debug!(target: LOG_TARGET, "Max block weight: {}", max_block_weight_full.max_block);
+ // Get max block weight for the mandatory class if defined, otherwise total max weight of
+ // the block.
+ let max_weight = max_block_weight_full
+ .per_class
+ .get(dispatch_class)
+ .max_total
+ .unwrap_or(max_block_weight_full.max_block);
+ log::debug!(target: LOG_TARGET, "Used max block time weight: {}", max_weight);
+
+ let max_block_size_full = ::BlockLength::get();
+ let max_block_size = max_block_size_full.max.get(dispatch_class);
+ log::debug!(target: LOG_TARGET, "Used max block size: {}", max_block_size);
+
+ // Adjust proof size to max block size as we are tracking tx size.
+ max_weight.set_proof_size(*max_block_size as u64)
+ };
+ log::debug!(target: LOG_TARGET, "Used max block weight: {}", max_block_weight);
+
let entropy = compute_entropy::(parent_hash);
let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into());
@@ -388,10 +418,9 @@ impl Pallet {
disputes,
dispute_statement_set_valid,
max_block_weight,
- &mut rng,
);
- let full_weight = if context == ProcessInherentDataContext::ProvideInherent {
+ let all_weight_after = if context == ProcessInherentDataContext::ProvideInherent {
// Assure the maximum block weight is adhered, by limiting bitfields and backed
// candidates. Dispute statement sets were already limited before.
let non_disputes_weight = apply_weight_limit::(
@@ -401,36 +430,38 @@ impl Pallet {
&mut rng,
);
- let full_weight =
+ let all_weight_after =
non_disputes_weight.saturating_add(checked_disputes_sets_consumed_weight);
- METRICS.on_after_filter(full_weight.ref_time());
+ METRICS.on_after_filter(all_weight_after.ref_time());
+ log::debug!(
+ target: LOG_TARGET,
+ "[process_inherent_data] after filter: bitfields.len(): {}, backed_candidates.len(): {}, checked_disputes_sets.len() {}",
+ bitfields.len(),
+ backed_candidates.len(),
+ checked_disputes_sets.len()
+ );
+ log::debug!(target: LOG_TARGET, "Size after filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_after.proof_size(), non_disputes_weight.proof_size(), checked_disputes_sets_consumed_weight.proof_size());
+ log::debug!(target: LOG_TARGET, "Time weight after filter: {}, candidates + bitfields: {}, disputes: {}", all_weight_after.ref_time(), non_disputes_weight.ref_time(), checked_disputes_sets_consumed_weight.ref_time());
- if full_weight.any_gt(max_block_weight) {
- log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large.");
+ if all_weight_after.any_gt(max_block_weight) {
+ log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large, time: {}, size: {}", all_weight_after.ref_time(), all_weight_after.proof_size());
}
-
- full_weight
+ all_weight_after
} else {
- // We compute the weight for the unfiltered disputes for a stronger check, since `create_inherent`
- // should already have filtered them out in block authorship.
- let full_weight = candidates_weight
- .saturating_add(bitfields_weight)
- .saturating_add(disputes_weight);
-
// This check is performed in the context of block execution. Ensures inherent weight invariants guaranteed
// by `create_inherent_data` for block authorship.
- if full_weight.any_gt(max_block_weight) {
+ if all_weight_before.any_gt(max_block_weight) {
log::error!(
"Overweight para inherent data reached the runtime {:?}: {} > {}",
parent_hash,
- full_weight,
+ all_weight_before,
max_block_weight
);
}
- ensure!(full_weight.all_lte(max_block_weight), Error::::InherentOverweight);
- full_weight
+ ensure!(all_weight_before.all_lte(max_block_weight), Error::::InherentOverweight);
+ all_weight_before
};
// Note that `process_checked_multi_dispute_data` will iterate and import each
@@ -597,7 +628,7 @@ impl Pallet {
let processed =
ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header };
- Ok((processed, Some(full_weight).into()))
+ Ok((processed, Some(all_weight_after).into()))
}
}
@@ -702,7 +733,7 @@ fn apply_weight_limit(
) -> Weight {
let total_candidates_weight = backed_candidates_weight::(candidates.as_slice());
- let total_bitfields_weight = signed_bitfields_weight::(bitfields.len());
+ let total_bitfields_weight = signed_bitfields_weight::(&bitfields);
let total = total_bitfields_weight.saturating_add(total_candidates_weight);
@@ -734,6 +765,7 @@ fn apply_weight_limit(
|c| backed_candidate_weight::(c),
max_consumable_by_candidates,
);
+ log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", indices, candidates.len());
candidates.indexed_retain(|idx, _backed_candidate| indices.binary_search(&idx).is_ok());
// pick all bitfields, and
// fill the remaining space with candidates
@@ -750,9 +782,10 @@ fn apply_weight_limit(
rng,
&bitfields,
vec![],
- |_| <::WeightInfo as WeightInfo>::enter_bitfields(),
+ |bitfield| signed_bitfield_weight::(&bitfield),
max_consumable_weight,
);
+ log::debug!(target: LOG_TARGET, "Indices Bitfields: {:?}, size: {}", indices, bitfields.len());
bitfields.indexed_retain(|idx, _bitfield| indices.binary_search(&idx).is_ok());
@@ -941,94 +974,41 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] {
/// 2. If exceeded:
/// 1. Check validity of all dispute statements sequentially
/// 2. If not exceeded:
-/// 1. Sort the disputes based on locality and age, locality first.
-/// 1. Split the array
-/// 1. Prefer local ones over remote disputes
/// 1. If weight is exceeded by locals, pick the older ones (lower indices)
/// until the weight limit is reached.
-/// 1. If weight is exceeded by locals and remotes, pick remotes
-/// randomly and check validity one by one.
///
/// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`.
fn limit_and_sanitize_disputes<
T: Config,
CheckValidityFn: FnMut(DisputeStatementSet) -> Option,
>(
- mut disputes: MultiDisputeStatementSet,
+ disputes: MultiDisputeStatementSet,
mut dispute_statement_set_valid: CheckValidityFn,
max_consumable_weight: Weight,
- rng: &mut rand_chacha::ChaChaRng,
) -> (Vec, Weight) {
// The total weight if all disputes would be included
- let disputes_weight = multi_dispute_statement_sets_weight::(&disputes);
+ let disputes_weight = multi_dispute_statement_sets_weight::(&disputes);
if disputes_weight.any_gt(max_consumable_weight) {
+ log::debug!(target: LOG_TARGET, "Above max consumable weight: {}/{}", disputes_weight, max_consumable_weight);
let mut checked_acc = Vec::::with_capacity(disputes.len());
- // Since the disputes array is sorted, we may use binary search to find the beginning of
- // remote disputes
- let idx = disputes
- .binary_search_by(|probe| {
- if T::DisputesHandler::included_state(probe.session, probe.candidate_hash).is_some()
- {
- Ordering::Less
- } else {
- Ordering::Greater
- }
- })
- // The above predicate will never find an item and therefore we are guaranteed to obtain
- // an error, which we can safely unwrap. QED.
- .unwrap_err();
-
- // Due to the binary search predicate above, the index computed will constitute the beginning
- // of the remote disputes sub-array `[Local, Local, Local, ^Remote, Remote]`.
- let remote_disputes = disputes.split_off(idx);
-
// Accumualated weight of all disputes picked, that passed the checks.
let mut weight_acc = Weight::zero();
// Select disputes in-order until the remaining weight is attained
- disputes.iter().for_each(|dss| {
- let dispute_weight = <::WeightInfo as WeightInfo>::enter_variable_disputes(
- dss.statements.len() as u32,
- );
+ disputes.into_iter().for_each(|dss| {
+ let dispute_weight = dispute_statement_set_weight::(&dss);
let updated = weight_acc.saturating_add(dispute_weight);
if max_consumable_weight.all_gte(updated) {
- // only apply the weight if the validity check passes
- if let Some(checked) = dispute_statement_set_valid(dss.clone()) {
+ // Always apply the weight. Invalid data cost processing time too:
+ weight_acc = updated;
+ if let Some(checked) = dispute_statement_set_valid(dss) {
checked_acc.push(checked);
- weight_acc = updated;
}
}
});
- // Compute the statements length of all remote disputes
- let d = remote_disputes.iter().map(|d| d.statements.len() as u32).collect::>();
-
- // Select remote disputes at random until the block is full
- let (_acc_remote_disputes_weight, mut indices) = random_sel::(
- rng,
- &d,
- vec![],
- |v| <::WeightInfo as WeightInfo>::enter_variable_disputes(*v),
- max_consumable_weight.saturating_sub(weight_acc),
- );
-
- // Sort the indices, to retain the same sorting as the input.
- indices.sort();
-
- // Add the remote disputes after checking their validity.
- checked_acc.extend(indices.into_iter().filter_map(|idx| {
- dispute_statement_set_valid(remote_disputes[idx].clone()).map(|cdss| {
- let weight = <::WeightInfo as WeightInfo>::enter_variable_disputes(
- cdss.as_ref().statements.len() as u32,
- );
- weight_acc = weight_acc.saturating_add(weight);
- cdss
- })
- }));
-
- // Update the remaining weight
(checked_acc, weight_acc)
} else {
// Go through all of them, and just apply the filter, they would all fit
@@ -1037,7 +1017,7 @@ fn limit_and_sanitize_disputes<
.filter_map(|dss| dispute_statement_set_valid(dss))
.collect::>();
// some might have been filtered out, so re-calc the weight
- let checked_disputes_weight = multi_dispute_statement_sets_weight::(&checked);
+ let checked_disputes_weight = checked_multi_dispute_statement_sets_weight::(&checked);
(checked, checked_disputes_weight)
}
}
diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs
index 0098814c8aca..c2e80e7525fb 100644
--- a/runtime/parachains/src/paras_inherent/tests.rs
+++ b/runtime/parachains/src/paras_inherent/tests.rs
@@ -21,13 +21,16 @@ use super::*;
// weights for limiting data will fail, so we don't run them when using the benchmark feature.
#[cfg(not(feature = "runtime-benchmarks"))]
mod enter {
+
use super::*;
use crate::{
builder::{Bench, BenchBuilder},
- mock::{new_test_ext, MockGenesisConfig, Test},
+ mock::{new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test},
};
use assert_matches::assert_matches;
use frame_support::assert_ok;
+ use frame_system::limits;
+ use sp_runtime::Perbill;
use sp_std::collections::btree_map::BTreeMap;
struct TestConfig {
@@ -300,6 +303,7 @@ mod enter {
// Ensure that when dispute data establishes an over weight block that we adequately
// filter out disputes according to our prioritization rule
fn limit_dispute_data() {
+ sp_tracing::try_init_simple();
new_test_ext(MockGenesisConfig::default()).execute_with(|| {
// Create the inherent data for this block
let dispute_statements = BTreeMap::new();
@@ -486,7 +490,8 @@ mod enter {
assert_ne!(limit_inherent_data, expected_para_inherent_data);
assert!(inherent_data_weight(&limit_inherent_data)
.all_lte(inherent_data_weight(&expected_para_inherent_data)));
- assert!(inherent_data_weight(&limit_inherent_data).all_lte(max_block_weight()));
+ assert!(inherent_data_weight(&limit_inherent_data)
+ .all_lte(max_block_weight_proof_size_adjusted()));
// Three disputes is over weight (see previous test), so we expect to only see 2 disputes
assert_eq!(limit_inherent_data.disputes.len(), 2);
@@ -565,17 +570,18 @@ mod enter {
});
}
- fn max_block_weight() -> Weight {
- ::BlockWeights::get().max_block
+ fn max_block_weight_proof_size_adjusted() -> Weight {
+ let raw_weight = ::BlockWeights::get().max_block;
+ let block_length = ::BlockLength::get();
+ raw_weight.set_proof_size(*block_length.max.get(DispatchClass::Mandatory) as u64)
}
fn inherent_data_weight(inherent_data: &ParachainsInherentData) -> Weight {
use thousands::Separable;
let multi_dispute_statement_sets_weight =
- multi_dispute_statement_sets_weight::(&inherent_data.disputes);
- let signed_bitfields_weight =
- signed_bitfields_weight::(inherent_data.bitfields.len());
+ multi_dispute_statement_sets_weight::(&inherent_data.disputes);
+ let signed_bitfields_weight = signed_bitfields_weight::(&inherent_data.bitfields);
let backed_candidates_weight =
backed_candidates_weight::(&inherent_data.backed_candidates);
@@ -622,7 +628,8 @@ mod enter {
});
let expected_para_inherent_data = scenario.data.clone();
- assert!(max_block_weight().any_lt(inherent_data_weight(&expected_para_inherent_data)));
+ assert!(max_block_weight_proof_size_adjusted()
+ .any_lt(inherent_data_weight(&expected_para_inherent_data)));
// Check the para inherent data is as expected:
// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25)
@@ -641,9 +648,10 @@ mod enter {
// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
assert!(limit_inherent_data != expected_para_inherent_data);
assert!(
- max_block_weight().all_gte(inherent_data_weight(&limit_inherent_data)),
+ max_block_weight_proof_size_adjusted()
+ .all_gte(inherent_data_weight(&limit_inherent_data)),
"Post limiting exceeded block weight: max={} vs. inherent={}",
- max_block_weight(),
+ max_block_weight_proof_size_adjusted(),
inherent_data_weight(&limit_inherent_data)
);
@@ -675,8 +683,199 @@ mod enter {
}
#[test]
+ fn disputes_are_size_limited() {
+ BlockLength::set(limits::BlockLength::max_with_normal_ratio(
+ 600,
+ Perbill::from_percent(75),
+ ));
+ // Virtually no time based limit:
+ BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts(
+ u64::MAX,
+ u64::MAX,
+ )));
+ new_test_ext(MockGenesisConfig::default()).execute_with(|| {
+ // Create the inherent data for this block
+ let mut dispute_statements = BTreeMap::new();
+ dispute_statements.insert(2, 7);
+ dispute_statements.insert(3, 7);
+ dispute_statements.insert(4, 7);
+
+ let backed_and_concluding = BTreeMap::new();
+
+ let scenario = make_inherent_data(TestConfig {
+ dispute_statements,
+ dispute_sessions: vec![2, 2, 1], // 3 cores with disputes
+ backed_and_concluding,
+ num_validators_per_core: 5,
+ code_upgrade: None,
+ });
+
+ let expected_para_inherent_data = scenario.data.clone();
+ assert!(max_block_weight_proof_size_adjusted()
+ .any_lt(inherent_data_weight(&expected_para_inherent_data)));
+
+ // Check the para inherent data is as expected:
+ // * 1 bitfield per validator (5 validators per core, 3 disputes => 3*5 = 15)
+ assert_eq!(expected_para_inherent_data.bitfields.len(), 15);
+ // * 2 backed candidates
+ assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0);
+ // * 3 disputes.
+ assert_eq!(expected_para_inherent_data.disputes.len(), 3);
+ let mut inherent_data = InherentData::new();
+ inherent_data
+ .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
+ .unwrap();
+ let limit_inherent_data =
+ Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap();
+ // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+ assert!(limit_inherent_data != expected_para_inherent_data);
+ assert!(
+ max_block_weight_proof_size_adjusted()
+ .all_gte(inherent_data_weight(&limit_inherent_data)),
+ "Post limiting exceeded block weight: max={} vs. inherent={}",
+ max_block_weight_proof_size_adjusted(),
+ inherent_data_weight(&limit_inherent_data)
+ );
+
+ // * 1 bitfields - gone
+ assert_eq!(limit_inherent_data.bitfields.len(), 0);
+ // * 2 backed candidates - still none.
+ assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
+ // * 3 disputes - filtered.
+ assert_eq!(limit_inherent_data.disputes.len(), 1);
+ });
+ }
+
+ #[test]
+ fn bitfields_are_size_limited() {
+ BlockLength::set(limits::BlockLength::max_with_normal_ratio(
+ 600,
+ Perbill::from_percent(75),
+ ));
+ // Virtually no time based limit:
+ BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts(
+ u64::MAX,
+ u64::MAX,
+ )));
+ new_test_ext(MockGenesisConfig::default()).execute_with(|| {
+ // Create the inherent data for this block
+ let dispute_statements = BTreeMap::new();
+
+ let mut backed_and_concluding = BTreeMap::new();
+ // 2 backed candidates shall be scheduled
+ backed_and_concluding.insert(0, 2);
+ backed_and_concluding.insert(1, 2);
+
+ let scenario = make_inherent_data(TestConfig {
+ dispute_statements,
+ dispute_sessions: Vec::new(),
+ backed_and_concluding,
+ num_validators_per_core: 5,
+ code_upgrade: None,
+ });
+
+ let expected_para_inherent_data = scenario.data.clone();
+ assert!(max_block_weight_proof_size_adjusted()
+ .any_lt(inherent_data_weight(&expected_para_inherent_data)));
+
+ // Check the para inherent data is as expected:
+ // * 1 bitfield per validator (5 validators per core, 2 backed candidates => 2*5 = 10)
+ assert_eq!(expected_para_inherent_data.bitfields.len(), 10);
+ // * 2 backed candidates
+ assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
+ // * 3 disputes.
+ assert_eq!(expected_para_inherent_data.disputes.len(), 0);
+ let mut inherent_data = InherentData::new();
+ inherent_data
+ .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
+ .unwrap();
+
+ let limit_inherent_data =
+ Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap();
+ // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+ assert!(limit_inherent_data != expected_para_inherent_data);
+ assert!(
+ max_block_weight_proof_size_adjusted()
+ .all_gte(inherent_data_weight(&limit_inherent_data)),
+ "Post limiting exceeded block weight: max={} vs. inherent={}",
+ max_block_weight_proof_size_adjusted(),
+ inherent_data_weight(&limit_inherent_data)
+ );
+
+ // * 1 bitfields have been filtered
+ assert_eq!(limit_inherent_data.bitfields.len(), 8);
+ // * 2 backed candidates have been filtered as well (not even space for bitfields)
+ assert_eq!(limit_inherent_data.backed_candidates.len(), 0);
+ // * 3 disputes. Still none.
+ assert_eq!(limit_inherent_data.disputes.len(), 0);
+ });
+ }
+
+ #[test]
+ fn candidates_are_size_limited() {
+ BlockLength::set(limits::BlockLength::max_with_normal_ratio(
+ 1_300,
+ Perbill::from_percent(75),
+ ));
+ // Virtually no time based limit:
+ BlockWeights::set(frame_system::limits::BlockWeights::simple_max(Weight::from_parts(
+ u64::MAX,
+ u64::MAX,
+ )));
+ new_test_ext(MockGenesisConfig::default()).execute_with(|| {
+ let mut backed_and_concluding = BTreeMap::new();
+ // 2 backed candidates shall be scheduled
+ backed_and_concluding.insert(0, 2);
+ backed_and_concluding.insert(1, 2);
+
+ let scenario = make_inherent_data(TestConfig {
+ dispute_statements: BTreeMap::new(),
+ dispute_sessions: Vec::new(),
+ backed_and_concluding,
+ num_validators_per_core: 5,
+ code_upgrade: None,
+ });
+
+ let expected_para_inherent_data = scenario.data.clone();
+ assert!(max_block_weight_proof_size_adjusted()
+ .any_lt(inherent_data_weight(&expected_para_inherent_data)));
+
+ // Check the para inherent data is as expected:
+ // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => 2*5 = 10)
+ assert_eq!(expected_para_inherent_data.bitfields.len(), 10);
+ // * 2 backed candidates
+ assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2);
+ // * 0 disputes.
+ assert_eq!(expected_para_inherent_data.disputes.len(), 0);
+ let mut inherent_data = InherentData::new();
+ inherent_data
+ .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
+ .unwrap();
+
+ let limit_inherent_data =
+ Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap();
+ // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
+ assert!(limit_inherent_data != expected_para_inherent_data);
+ assert!(
+ max_block_weight_proof_size_adjusted()
+ .all_gte(inherent_data_weight(&limit_inherent_data)),
+ "Post limiting exceeded block weight: max={} vs. inherent={}",
+ max_block_weight_proof_size_adjusted(),
+ inherent_data_weight(&limit_inherent_data)
+ );
+
+ // * 1 bitfields - no filtering here
+ assert_eq!(limit_inherent_data.bitfields.len(), 10);
+ // * 2 backed candidates
+ assert_eq!(limit_inherent_data.backed_candidates.len(), 1);
+ // * 0 disputes.
+ assert_eq!(limit_inherent_data.disputes.len(), 0);
+ });
+ }
+
// Ensure that overweight parachain inherents are always rejected by the runtime.
// Runtime should panic and return `InherentOverweight` error.
+ #[test]
fn inherent_create_weight_invariant() {
new_test_ext(MockGenesisConfig::default()).execute_with(|| {
// Create an overweight inherent and oversized block
@@ -700,7 +899,8 @@ mod enter {
});
let expected_para_inherent_data = scenario.data.clone();
- assert!(max_block_weight().any_lt(inherent_data_weight(&expected_para_inherent_data)));
+ assert!(max_block_weight_proof_size_adjusted()
+ .any_lt(inherent_data_weight(&expected_para_inherent_data)));
// Check the para inherent data is as expected:
// * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes => 5*33 = 165)
@@ -713,7 +913,6 @@ mod enter {
inherent_data
.put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data)
.unwrap();
-
let dispatch_error = Pallet::::enter(
frame_system::RawOrigin::None.into(),
expected_para_inherent_data,
@@ -724,8 +923,6 @@ mod enter {
assert_eq!(dispatch_error, Error::::InherentOverweight.into());
});
}
-
- // TODO: Test process inherent invariant
}
fn default_header() -> primitives::Header {
diff --git a/runtime/parachains/src/paras_inherent/weights.rs b/runtime/parachains/src/paras_inherent/weights.rs
index f6e1262f5eb9..05cc53fae046 100644
--- a/runtime/parachains/src/paras_inherent/weights.rs
+++ b/runtime/parachains/src/paras_inherent/weights.rs
@@ -13,10 +13,20 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-use super::{
- BackedCandidate, Config, DisputeStatementSet, UncheckedSignedAvailabilityBitfield, Weight,
+
+//! We use benchmarks to get time weights, for proof_size we manually use the size of the input
+//! data, which will be part of the block. This is because we don't care about the storage proof on
+//! the relay chain, but we do care about the size of the block, by putting the tx in the
+//! proof_size we can use the already existing weight limiting code to limit the used size as well.
+
+use parity_scale_codec::{Encode, WrapperTypeEncode};
+use primitives::{
+ CheckedMultiDisputeStatementSet, MultiDisputeStatementSet, UncheckedSignedAvailabilityBitfield,
+ UncheckedSignedAvailabilityBitfields,
};
+use super::{BackedCandidate, Config, DisputeStatementSet, Weight};
+
pub trait WeightInfo {
/// Variant over `v`, the count of dispute statements in a dispute statement set. This gives the
/// weight of a single dispute statement set.
@@ -72,51 +82,82 @@ impl WeightInfo for TestWeightInfo {
pub fn paras_inherent_total_weight(
backed_candidates: &[BackedCandidate<::Hash>],
- bitfields: &[UncheckedSignedAvailabilityBitfield],
- disputes: &[DisputeStatementSet],
+ bitfields: &UncheckedSignedAvailabilityBitfields,
+ disputes: &MultiDisputeStatementSet,
) -> Weight {
backed_candidates_weight::(backed_candidates)
- .saturating_add(signed_bitfields_weight::(bitfields.len()))
- .saturating_add(multi_dispute_statement_sets_weight::(disputes))
+ .saturating_add(signed_bitfields_weight::(bitfields))
+ .saturating_add(multi_dispute_statement_sets_weight::(disputes))
}
-pub fn dispute_statement_set_weight>(
- statement_set: S,
+pub fn multi_dispute_statement_sets_weight(
+ disputes: &MultiDisputeStatementSet,
) -> Weight {
- <::WeightInfo as WeightInfo>::enter_variable_disputes(
- statement_set.as_ref().statements.len() as u32,
+ set_proof_size_to_tx_size(
+ disputes
+ .iter()
+ .map(|d| dispute_statement_set_weight::(d))
+ .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight)),
+ disputes,
)
}
-pub fn multi_dispute_statement_sets_weight<
+pub fn checked_multi_dispute_statement_sets_weight(
+ disputes: &CheckedMultiDisputeStatementSet,
+) -> Weight {
+ set_proof_size_to_tx_size(
+ disputes
+ .iter()
+ .map(|d| dispute_statement_set_weight::(d))
+ .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight)),
+ disputes,
+ )
+}
+
+/// Get time weights from benchmarks and set proof size to tx size.
+pub fn dispute_statement_set_weight(statement_set: D) -> Weight
+where
T: Config,
- D: AsRef<[S]>,
- S: AsRef,
->(
- disputes: D,
+ D: AsRef + WrapperTypeEncode + Sized + Encode,
+{
+ set_proof_size_to_tx_size(
+ <::WeightInfo as WeightInfo>::enter_variable_disputes(
+ statement_set.as_ref().statements.len() as u32,
+ ),
+ statement_set,
+ )
+}
+
+pub fn signed_bitfields_weight(
+ bitfields: &UncheckedSignedAvailabilityBitfields,
) -> Weight {
- disputes
- .as_ref()
- .iter()
- .map(|d| dispute_statement_set_weight::(d))
- .fold(Weight::zero(), |acc_weight, weight| acc_weight.saturating_add(weight))
+ set_proof_size_to_tx_size(
+ <::WeightInfo as WeightInfo>::enter_bitfields()
+ .saturating_mul(bitfields.len() as u64),
+ bitfields,
+ )
}
-pub fn signed_bitfields_weight(bitfields_len: usize) -> Weight {
- <::WeightInfo as WeightInfo>::enter_bitfields()
- .saturating_mul(bitfields_len as u64)
+pub fn signed_bitfield_weight(bitfield: &UncheckedSignedAvailabilityBitfield) -> Weight {
+ set_proof_size_to_tx_size(
+ <::WeightInfo as WeightInfo>::enter_bitfields(),
+ bitfield,
+ )
}
pub fn backed_candidate_weight(
candidate: &BackedCandidate,
) -> Weight {
- if candidate.candidate.commitments.new_validation_code.is_some() {
- <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade()
- } else {
- <::WeightInfo as WeightInfo>::enter_backed_candidates_variable(
- candidate.validity_votes.len() as u32,
- )
- }
+ set_proof_size_to_tx_size(
+ if candidate.candidate.commitments.new_validation_code.is_some() {
+ <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade()
+ } else {
+ <::WeightInfo as WeightInfo>::enter_backed_candidates_variable(
+ candidate.validity_votes.len() as u32,
+ )
+ },
+ candidate,
+ )
}
pub fn backed_candidates_weight(
@@ -127,3 +168,8 @@ pub fn backed_candidates_weight(
.map(|c| backed_candidate_weight::(c))
.fold(Weight::zero(), |acc, x| acc.saturating_add(x))
}
+
+/// Set proof_size component of `Weight` to tx size.
+fn set_proof_size_to_tx_size(weight: Weight, arg: Arg) -> Weight {
+ weight.set_proof_size(arg.encoded_size() as u64)
+}
diff --git a/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs
index ae1c502ae921..70eb764305e4 100644
--- a/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs
+++ b/runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs
@@ -17,27 +17,26 @@
//! Autogenerated weights for `runtime_parachains::paras_inherent`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=polkadot-dev
// --steps=50
// --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=runtime_parachains::paras_inherent
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
+// --pallet=runtime_parachains::paras_inherent
+// --chain=polkadot-dev
// --header=./file_header.txt
-// --output=./runtime/polkadot/src/weights/runtime_parachains_paras_inherent.rs
+// --output=./runtime/polkadot/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -117,11 +116,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `50915`
// Estimated: `56855 + v * (23 ±0)`
- // Minimum execution time: 999_775_000 picoseconds.
- Weight::from_parts(461_856_558, 0)
+ // Minimum execution time: 999_704_000 picoseconds.
+ Weight::from_parts(455_751_887, 0)
.saturating_add(Weight::from_parts(0, 56855))
- // Standard Error: 15_669
- .saturating_add(Weight::from_parts(56_847_986, 0).saturating_mul(v.into()))
+ // Standard Error: 14_301
+ .saturating_add(Weight::from_parts(57_084_663, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(28))
.saturating_add(T::DbWeight::get().writes(15))
.saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into()))
@@ -190,8 +189,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42748`
// Estimated: `48688`
- // Minimum execution time: 457_800_000 picoseconds.
- Weight::from_parts(482_446_000, 0)
+ // Minimum execution time: 485_153_000 picoseconds.
+ Weight::from_parts(504_774_000, 0)
.saturating_add(Weight::from_parts(0, 48688))
.saturating_add(T::DbWeight::get().reads(26))
.saturating_add(T::DbWeight::get().writes(16))
@@ -265,11 +264,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42784`
// Estimated: `48724`
- // Minimum execution time: 6_889_257_000 picoseconds.
- Weight::from_parts(1_240_166_857, 0)
+ // Minimum execution time: 6_906_795_000 picoseconds.
+ Weight::from_parts(1_315_944_667, 0)
.saturating_add(Weight::from_parts(0, 48724))
- // Standard Error: 23_642
- .saturating_add(Weight::from_parts(56_311_928, 0).saturating_mul(v.into()))
+ // Standard Error: 31_132
+ .saturating_add(Weight::from_parts(55_792_755, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(29))
.saturating_add(T::DbWeight::get().writes(15))
}
@@ -345,8 +344,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42811`
// Estimated: `48751`
- // Minimum execution time: 41_983_250_000 picoseconds.
- Weight::from_parts(43_216_188_000, 0)
+ // Minimum execution time: 44_487_810_000 picoseconds.
+ Weight::from_parts(46_317_208_000, 0)
.saturating_add(Weight::from_parts(0, 48751))
.saturating_add(T::DbWeight::get().reads(31))
.saturating_add(T::DbWeight::get().writes(15))
diff --git a/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs
index 72f70f8c4205..0dd64f054d00 100644
--- a/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs
+++ b/runtime/westend/src/weights/runtime_parachains_paras_inherent.rs
@@ -17,27 +17,26 @@
//! Autogenerated weights for `runtime_parachains::paras_inherent`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-06-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=westend-dev
// --steps=50
// --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=runtime_parachains::paras_inherent
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
+// --pallet=runtime_parachains::paras_inherent
+// --chain=westend-dev
// --header=./file_header.txt
-// --output=./runtime/westend/src/weights/runtime_parachains_paras_inherent.rs
+// --output=./runtime/westend/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -115,11 +114,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `50518`
// Estimated: `56458 + v * (23 ±0)`
- // Minimum execution time: 992_257_000 picoseconds.
- Weight::from_parts(473_224_562, 0)
+ // Minimum execution time: 998_338_000 picoseconds.
+ Weight::from_parts(468_412_001, 0)
.saturating_add(Weight::from_parts(0, 56458))
- // Standard Error: 50_055
- .saturating_add(Weight::from_parts(57_274_046, 0).saturating_mul(v.into()))
+ // Standard Error: 20_559
+ .saturating_add(Weight::from_parts(56_965_025, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(27))
.saturating_add(T::DbWeight::get().writes(15))
.saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into()))
@@ -186,8 +185,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42352`
// Estimated: `48292`
- // Minimum execution time: 445_965_000 picoseconds.
- Weight::from_parts(476_329_000, 0)
+ // Minimum execution time: 457_404_000 picoseconds.
+ Weight::from_parts(485_416_000, 0)
.saturating_add(Weight::from_parts(0, 48292))
.saturating_add(T::DbWeight::get().reads(25))
.saturating_add(T::DbWeight::get().writes(16))
@@ -259,11 +258,11 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42387`
// Estimated: `48327`
- // Minimum execution time: 6_877_099_000 picoseconds.
- Weight::from_parts(1_267_644_471, 0)
+ // Minimum execution time: 6_864_029_000 picoseconds.
+ Weight::from_parts(1_237_704_892, 0)
.saturating_add(Weight::from_parts(0, 48327))
- // Standard Error: 96_443
- .saturating_add(Weight::from_parts(56_535_707, 0).saturating_mul(v.into()))
+ // Standard Error: 33_413
+ .saturating_add(Weight::from_parts(56_199_819, 0).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(28))
.saturating_add(T::DbWeight::get().writes(15))
}
@@ -337,8 +336,8 @@ impl runtime_parachains::paras_inherent::WeightInfo for
// Proof Size summary in bytes:
// Measured: `42414`
// Estimated: `48354`
- // Minimum execution time: 40_882_969_000 picoseconds.
- Weight::from_parts(45_409_238_000, 0)
+ // Minimum execution time: 43_320_529_000 picoseconds.
+ Weight::from_parts(45_622_613_000, 0)
.saturating_add(Weight::from_parts(0, 48354))
.saturating_add(T::DbWeight::get().reads(30))
.saturating_add(T::DbWeight::get().writes(15))