Skip to content

Commit

Permalink
Merge branch 'master' into bkchr-benchmarking-instance
Browse files Browse the repository at this point in the history
  • Loading branch information
bkchr authored Nov 11, 2024
2 parents a172feb + ace62f1 commit 03f762c
Show file tree
Hide file tree
Showing 26 changed files with 688 additions and 430 deletions.
4 changes: 1 addition & 3 deletions cumulus/client/relay-chain-minimal-node/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ async fn new_minimal_relay_chain<Block: BlockT, Network: NetworkBackend<RelayBlo
.chain_get_header(None)
.await?
.ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?;
let (network, network_starter, sync_service) = build_collator_network::<Network>(
let (network, sync_service) = build_collator_network::<Network>(
&config,
net_config,
task_manager.spawn_handle(),
Expand Down Expand Up @@ -262,8 +262,6 @@ async fn new_minimal_relay_chain<Block: BlockT, Network: NetworkBackend<RelayBlo
let overseer_handle =
collator_overseer::spawn_overseer(overseer_args, &task_manager, relay_chain_rpc_client)?;

network_starter.start_network();

Ok(NewMinimalNode { task_manager, overseer_handle })
}

Expand Down
26 changes: 4 additions & 22 deletions cumulus/client/relay-chain-minimal-node/src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ use sc_network::{

use sc_network::{config::FullNetworkConfiguration, NetworkBackend, NotificationService};
use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake};
use sc_service::{error::Error, Configuration, NetworkStarter, SpawnTaskHandle};
use sc_service::{error::Error, Configuration, SpawnTaskHandle};

use std::{iter, sync::Arc};

Expand All @@ -41,10 +41,7 @@ pub(crate) fn build_collator_network<Network: NetworkBackend<Block, Hash>>(
genesis_hash: Hash,
best_header: Header,
notification_metrics: NotificationMetrics,
) -> Result<
(Arc<dyn NetworkService>, NetworkStarter, Arc<dyn sp_consensus::SyncOracle + Send + Sync>),
Error,
> {
) -> Result<(Arc<dyn NetworkService>, Arc<dyn sp_consensus::SyncOracle + Send + Sync>), Error> {
let protocol_id = config.protocol_id();
let (block_announce_config, _notification_service) = get_block_announce_proto_config::<Network>(
protocol_id.clone(),
Expand Down Expand Up @@ -85,31 +82,16 @@ pub(crate) fn build_collator_network<Network: NetworkBackend<Block, Hash>>(
let network_worker = Network::new(network_params)?;
let network_service = network_worker.network_service();

let (network_start_tx, network_start_rx) = futures::channel::oneshot::channel();

// The network worker is responsible for gathering all network messages and processing
// them. This is quite a heavy task, and at the time of the writing of this comment it
// frequently happens that this future takes several seconds or in some situations
// even more than a minute until it has processed its entire queue. This is clearly an
// issue, and ideally we would like to fix the network future to take as little time as
// possible, but we also take the extra harm-prevention measure to execute the networking
// future using `spawn_blocking`.
spawn_handle.spawn_blocking("network-worker", Some("networking"), async move {
if network_start_rx.await.is_err() {
tracing::warn!(
"The NetworkStart returned as part of `build_network` has been silently dropped"
);
// This `return` might seem unnecessary, but we don't want to make it look like
// everything is working as normal even though the user is clearly misusing the API.
return
}

network_worker.run().await;
});

let network_starter = NetworkStarter::new(network_start_tx);
spawn_handle.spawn_blocking("network-worker", Some("networking"), network_worker.run());

Ok((network_service, network_starter, Arc::new(SyncOracle {})))
Ok((network_service, Arc::new(SyncOracle {})))
}

fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) {
Expand Down
3 changes: 1 addition & 2 deletions cumulus/client/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use sc_consensus::{
use sc_network::{config::SyncMode, service::traits::NetworkService, NetworkBackend};
use sc_network_sync::SyncingService;
use sc_network_transactions::TransactionsHandlerController;
use sc_service::{Configuration, NetworkStarter, SpawnTaskHandle, TaskManager, WarpSyncConfig};
use sc_service::{Configuration, SpawnTaskHandle, TaskManager, WarpSyncConfig};
use sc_telemetry::{log, TelemetryWorkerHandle};
use sc_utils::mpsc::TracingUnboundedSender;
use sp_api::ProvideRuntimeApi;
Expand Down Expand Up @@ -439,7 +439,6 @@ pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>(
Arc<dyn NetworkService>,
TracingUnboundedSender<sc_rpc::system::Request<Block>>,
TransactionsHandlerController<Block::Hash>,
NetworkStarter,
Arc<SyncingService<Block>>,
)>
where
Expand Down
4 changes: 1 addition & 3 deletions cumulus/polkadot-omni-node/lib/src/common/spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ pub(crate) trait NodeSpec: BaseNodeSpec {
prometheus_registry.clone(),
);

let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
let (network, system_rpc_tx, tx_handler_controller, sync_service) =
build_network(BuildNetworkParams {
parachain_config: &parachain_config,
net_config,
Expand Down Expand Up @@ -346,8 +346,6 @@ pub(crate) trait NodeSpec: BaseNodeSpec {
)?;
}

start_network.start_network();

Ok(task_manager)
}
.instrument(sc_tracing::tracing::info_span!(
Expand Down
3 changes: 1 addition & 2 deletions cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ impl<NodeSpec: NodeSpecT> ManualSealNode<NodeSpec> {
config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
);

let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
let (network, system_rpc_tx, tx_handler_controller, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
Expand Down Expand Up @@ -219,7 +219,6 @@ impl<NodeSpec: NodeSpecT> ManualSealNode<NodeSpec> {
telemetry: telemetry.as_mut(),
})?;

start_network.start_network();
Ok(task_manager)
}
}
4 changes: 1 addition & 3 deletions cumulus/test/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ where
prometheus_registry.clone(),
);

let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
let (network, system_rpc_tx, tx_handler_controller, sync_service) =
build_network(BuildNetworkParams {
parachain_config: &parachain_config,
net_config,
Expand Down Expand Up @@ -542,8 +542,6 @@ where
}
}

start_network.start_network();

Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,7 @@ impl BackedChain {
) -> impl Iterator<Item = FragmentNode> + 'a {
let mut found_index = None;
for index in 0..self.chain.len() {
let node = &self.chain[0];
let node = &self.chain[index];

if found_index.is_some() {
self.by_parent_head.remove(&node.parent_head_data_hash);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1165,8 +1165,9 @@ fn test_populate_and_check_potential() {
Err(Error::CandidateAlreadyKnown)
);

// Simulate a best chain reorg by backing a2.
// Simulate some best chain reorgs.
{
// Back A2. The reversion should happen right at the root.
let mut chain = chain.clone();
chain.candidate_backed(&candidate_a2_hash);
assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]);
Expand All @@ -1185,6 +1186,66 @@ fn test_populate_and_check_potential() {
chain.can_add_candidate_as_potential(&candidate_a_entry),
Err(Error::ForkChoiceRule(_))
);

// Simulate a more complex chain reorg.
// A2 points to B2, which is backed.
// A2 has underneath a subtree A2 -> B2 -> C3 and A2 -> B2 -> C4. B2 and C3 are backed. C4
// is kept because it has a lower candidate hash than C3. Backing C4 will cause a chain
// reorg.

// Candidate C3.
let (pvd_c3, candidate_c3) = make_committed_candidate(
para_id,
relay_parent_y_info.hash,
relay_parent_y_info.number,
vec![0xb4].into(),
vec![0xc2].into(),
relay_parent_y_info.number,
);
let candidate_c3_hash = candidate_c3.hash();
let candidate_c3_entry =
CandidateEntry::new(candidate_c3_hash, candidate_c3, pvd_c3, CandidateState::Seconded)
.unwrap();

// Candidate C4.
let (pvd_c4, candidate_c4) = make_committed_candidate(
para_id,
relay_parent_y_info.hash,
relay_parent_y_info.number,
vec![0xb4].into(),
vec![0xc3].into(),
relay_parent_y_info.number,
);
let candidate_c4_hash = candidate_c4.hash();
// C4 should have a lower candidate hash than C3.
assert_eq!(fork_selection_rule(&candidate_c4_hash, &candidate_c3_hash), Ordering::Less);
let candidate_c4_entry =
CandidateEntry::new(candidate_c4_hash, candidate_c4, pvd_c4, CandidateState::Seconded)
.unwrap();

let mut storage = storage.clone();
storage.add_candidate_entry(candidate_c3_entry).unwrap();
storage.add_candidate_entry(candidate_c4_entry).unwrap();
let mut chain = populate_chain_from_previous_storage(&scope, &storage);
chain.candidate_backed(&candidate_a2_hash);
chain.candidate_backed(&candidate_c3_hash);

assert_eq!(
chain.best_chain_vec(),
vec![candidate_a2_hash, candidate_b2_hash, candidate_c3_hash]
);

// Backing C4 will cause a reorg.
chain.candidate_backed(&candidate_c4_hash);
assert_eq!(
chain.best_chain_vec(),
vec![candidate_a2_hash, candidate_b2_hash, candidate_c4_hash]
);

assert_eq!(
chain.unconnected().map(|c| c.candidate_hash).collect::<HashSet<_>>(),
[candidate_f_hash].into_iter().collect()
);
}

// Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't
Expand Down
4 changes: 1 addition & 3 deletions polkadot/node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1003,7 +1003,7 @@ pub fn new_full<
})
};

let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
let (network, system_rpc_tx, tx_handler_controller, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
Expand Down Expand Up @@ -1383,8 +1383,6 @@ pub fn new_full<
);
}

network_starter.start_network();

Ok(NewFull {
task_manager,
client,
Expand Down
10 changes: 10 additions & 0 deletions prdoc/pr_6262.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
title: "Size limits implemented for fork aware transaction pool"

doc:
- audience: Node Dev
description: |
Size limits are now obeyed in fork aware transaction pool

crates:
- name: sc-transaction-pool
bump: minor
41 changes: 41 additions & 0 deletions prdoc/pr_6400.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
title: Remove network starter that is no longer needed
doc:
- audience: Node Dev
description: |-
# Description

This seems to be an old artifact of the long closed https://github.com/paritytech/substrate/issues/6827 that I noticed when working on related code earlier.

## Integration

`NetworkStarter` was removed, simply remove its usage:
```diff
-let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+let (network, system_rpc_tx, tx_handler_controller, sync_service) =
build_network(BuildNetworkParams {
...
-start_network.start_network();
```

## Review Notes

Changes are trivial, the only reason for this to not be accepted is if it is desired to not start network automatically for whatever reason, in which case the description of network starter needs to change.

# Checklist

* [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above.
* [ ] My PR follows the [labeling requirements](
https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process
) of this project (at minimum one label for `T` required)
* External contributors: ask maintainers to put the right label on your PR.
crates:
- name: cumulus-relay-chain-minimal-node
bump: major
- name: cumulus-client-service
bump: major
- name: polkadot-omni-node-lib
bump: major
- name: polkadot-service
bump: major
- name: sc-service
bump: major
9 changes: 9 additions & 0 deletions prdoc/pr_6417.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
title: fix prospective-parachains best backable chain reversion bug
doc:
- audience: Node Dev
description: |
Fixes a bug in the prospective-parachains subsystem that prevented proper best backable chain reorg.

crates:
- name: polkadot-node-core-prospective-parachains
bump: patch
3 changes: 1 addition & 2 deletions substrate/bin/node/cli/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
Vec::default(),
));

let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
let (network, system_rpc_tx, tx_handler_controller, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
Expand Down Expand Up @@ -801,7 +801,6 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
);
}

network_starter.start_network();
Ok(NewFullBase {
task_manager,
client,
Expand Down
Loading

0 comments on commit 03f762c

Please sign in to comment.