From 9cadada23be2767ee6dbfaccfc28eacc46307254 Mon Sep 17 00:00:00 2001 From: Ekaterina Ryazantseva Date: Fri, 30 Aug 2024 18:34:39 +0200 Subject: [PATCH 1/5] merge PeerDAS metrics --- eip_7594/src/lib.rs | 2 + fork_choice_control/src/controller.rs | 1 + fork_choice_store/src/store.rs | 22 ++++++++ p2p/src/network.rs | 5 ++ prometheus_metrics/src/metrics.rs | 77 ++++++++++++++++++++++++++- 5 files changed, 105 insertions(+), 2 deletions(-) diff --git a/eip_7594/src/lib.rs b/eip_7594/src/lib.rs index 36037c57..e43ae07a 100644 --- a/eip_7594/src/lib.rs +++ b/eip_7594/src/lib.rs @@ -198,6 +198,7 @@ fn get_data_columns_for_subnet(subnet_id: SubnetId) -> impl Iterator) -> Result> { let kzg_settings = settings(); @@ -222,6 +223,7 @@ pub fn compute_matrix(blobs: Vec) -> Result> { * * This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``. */ +// TODO: reconstructed_columns and columns_reconstruction_time metrics can be implemented with this function call pub fn recover_matrix( partial_matrix: Vec, blob_count: usize, diff --git a/fork_choice_control/src/controller.rs b/fork_choice_control/src/controller.rs index 80b4c0af..bdfaff24 100644 --- a/fork_choice_control/src/controller.rs +++ b/fork_choice_control/src/controller.rs @@ -111,6 +111,7 @@ where anchor_block, anchor_state, finished_initial_forward_sync, + metrics.clone(), ); store.apply_tick(tick)?; diff --git a/fork_choice_store/src/store.rs b/fork_choice_store/src/store.rs index 1062ceef..0aa431d9 100644 --- a/fork_choice_store/src/store.rs +++ b/fork_choice_store/src/store.rs @@ -217,6 +217,7 @@ pub struct Store { rejected_block_roots: HashSet, finished_initial_forward_sync: bool, custody_columns: HashSet, + metrics: Option>, } impl Store

{ @@ -228,6 +229,7 @@ impl Store

{ anchor_block: Arc>, anchor_state: Arc>, finished_initial_forward_sync: bool, + metrics: Option>, ) -> Self { let block_root = anchor_block.message().hash_tree_root(); let state_root = anchor_state.hash_tree_root(); @@ -288,6 +290,7 @@ impl Store

{ rejected_block_roots: HashSet::default(), finished_initial_forward_sync, custody_columns: HashSet::default(), + metrics, } } @@ -1799,6 +1802,10 @@ impl Store

{ mut verifier: impl Verifier + Send, metrics: &Option>, ) -> Result> { + if let Some(metrics) = self.metrics.as_ref() { + metrics.data_column_sidecars_submitted_for_processing.inc(); + } + let _timer = metrics .as_ref() .map(|metrics| metrics.data_column_sidecar_verification_times.start_timer()); @@ -1859,20 +1866,35 @@ impl Store

{ )?; // [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by verify_data_column_sidecar_inclusion_proof(sidecar). + // data_column_sidecar_inclusion_proof_verification metric should be somewhere here + // or maybe inside the verify_sidecar_inclusion_proof() function + let _sidecar_inclusion_proof_timer = metrics + .as_ref() + .map(|metrics| metrics.data_column_sidecar_inclusion_proof_verification.start_timer()); + ensure!( verify_sidecar_inclusion_proof(&data_column_sidecar), Error::DataColumnSidecarInvalidInclusionProof { data_column_sidecar } ); + // stop _sidecar_inclusion_proof_timer ? // [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs(sidecar). + // data_column_sidecar_kzg_verification_batch metric should be somewhere here + // or maybe inside the verify_kzg_proofs() function + // where to put data_column_sidecar_kzg_verification_single? + let _sidecar_inclusion_proof_timer = metrics + .as_ref() + .map(|metrics| metrics.data_column_sidecar_kzg_verification_batch.start_timer()); + verify_kzg_proofs(&data_column_sidecar).map_err(|error| { Error::DataColumnSidecarInvalid { data_column_sidecar: data_column_sidecar.clone_arc(), error, } })?; + // stop data_column_sidecar_kzg_verification_batch ? // [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation. // Part 1/2: diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 265dc6a3..4e1bbd09 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -2275,6 +2275,7 @@ impl Network

{ let request = DataColumnsByRootRequest::new( data_column_identifiers + .clone() .try_into() .expect("length is under maximum"), ); @@ -2287,6 +2288,10 @@ impl Network

{ ); self.request(peer_id, request_id, Request::DataColumnsByRoot(request)); + let custody_columns_count = data_column_identifiers.len(); + if let Some(metrics) = self.metrics.as_ref() { + metrics.set_custody_columns("by_root", custody_columns_count); + } } fn subscribe_to_core_topics(&mut self) { diff --git a/prometheus_metrics/src/metrics.rs b/prometheus_metrics/src/metrics.rs index 7e540efb..9d137203 100644 --- a/prometheus_metrics/src/metrics.rs +++ b/prometheus_metrics/src/metrics.rs @@ -49,10 +49,18 @@ pub struct Metrics { pub received_sync_contribution_subsets: IntCounter, pub received_aggregated_attestation_subsets: IntCounter, - // Custody Subnets / Data Column Verification times + // Custody Subnets / PeerDAS column_subnet_peers: IntGaugeVec, + pub data_column_sidecars_submitted_for_processing: IntCounter, pub verified_gossip_data_column_sidecar: IntCounter, pub data_column_sidecar_verification_times: Histogram, + pub reconstructed_columns: IntCounter, + pub columns_reconstruction_time: Histogram, + pub data_column_sidecar_computation: Histogram, + pub data_column_sidecar_inclusion_proof_verification: Histogram, + pub data_column_sidecar_kzg_verification_single: Histogram, + pub data_column_sidecar_kzg_verification_batch: Histogram, + pub custody_columns_count: IntGauge, // Extra Network stats gossip_block_slot_start_delay_time: Histogram, @@ -266,11 +274,17 @@ impl Metrics { "Number of received aggregated attestations that are subsets of already known aggregates" )?, + // Custody Subnets / PeerDAS column_subnet_peers: IntGaugeVec::new( opts!("PEERS_PER_COLUMN_SUBNET", "Number of connected peers per column subnet"), &["subnet_id"], )?, + data_column_sidecars_submitted_for_processing: IntCounter::new( + "DATA_COLUMN_SIDECARS_SUBMITTED_FOR_PROCESSING", + "Number of data column sidecars submitted for processing" + )?, + verified_gossip_data_column_sidecar: IntCounter::new( "VERIFIED_GOSSIP_DATA_COLUMN_SIDECAR", "Number of gossip data column sidecar verified for propagation" @@ -278,9 +292,44 @@ impl Metrics { data_column_sidecar_verification_times: Histogram::with_opts(histogram_opts!( "DATA_COLUMN_SIDECAR_VERIFICATION_TIMES", - "Time takes to verify a data column sidecar" + "Time taken to verify a data column sidecar" + ))?, + + reconstructed_columns: IntCounter::new( + "RECONSTRUCTED_COLUMNS", + "Total count of reconstructed columns" + )?, + + columns_reconstruction_time: Histogram::with_opts(histogram_opts!( + "COLUMNS_RECONSTRUCTION_TIME", + "Time taken to reconstruct columns" + ))?, + + data_column_sidecar_computation: Histogram::with_opts(histogram_opts!( + "DATA_COLUMN_SIDECAR_COMPUTATION", + "Time taken to compute data column sidecar, including cells, proofs and inclusion proof" + ))?, + + data_column_sidecar_inclusion_proof_verification: Histogram::with_opts(histogram_opts!( + "DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION", + "Time taken to verify data column sidecar inclusion proof" + ))?, + + data_column_sidecar_kzg_verification_single: Histogram::with_opts(histogram_opts!( + "DATA_COLUMN_SIDECAR_KZG_VERIFICATION_SINGLE", + "Runtime of single data column kzg verification" + ))?, + + data_column_sidecar_kzg_verification_batch: Histogram::with_opts(histogram_opts!( + "DATA_COLUMN_SIDECAR_KZG_VERIFICATION_BATCH", + "Runtime of batched data column kzg verification" ))?, + custody_columns_count: IntGauge::new( + "CUSTODY_COLUMNS_COUNT", + "Total count of columns in custody", + )?, + // Extra Network stats gossip_block_slot_start_delay_time: Histogram::with_opts(histogram_opts!( "beacon_block_gossip_slot_start_delay_time", @@ -766,10 +815,28 @@ impl Metrics { self.received_aggregated_attestation_subsets.clone(), ))?; default_registry.register(Box::new(self.column_subnet_peers.clone()))?; + default_registry.register(Box::new(self.data_column_sidecars_submitted_for_processing.clone()))?; default_registry.register(Box::new(self.verified_gossip_data_column_sidecar.clone()))?; default_registry.register(Box::new( self.data_column_sidecar_verification_times.clone(), ))?; + default_registry.register(Box::new(self.reconstructed_columns.clone(),))?; + default_registry.register(Box::new( + self.columns_reconstruction_time.clone(), + ))?; + default_registry.register(Box::new( + self.data_column_sidecar_computation.clone(), + ))?; + default_registry.register(Box::new( + self.data_column_sidecar_inclusion_proof_verification.clone(), + ))?; + default_registry.register(Box::new( + self.data_column_sidecar_kzg_verification_single.clone(), + ))?; + default_registry.register(Box::new( + self.data_column_sidecar_kzg_verification_batch.clone(), + ))?; + default_registry.register(Box::new(self.custody_columns_count.clone()))?; default_registry.register(Box::new(self.gossip_block_slot_start_delay_time.clone()))?; default_registry.register(Box::new(self.mutator_attestations.clone()))?; default_registry.register(Box::new(self.mutator_aggregate_and_proofs.clone()))?; @@ -997,6 +1064,7 @@ impl Metrics { } } + // Custody Subnets / PeerDAS pub fn set_column_subnet_peers(&self, subnet_id: &str, num_peers: usize) { match self .column_subnet_peers @@ -1012,6 +1080,11 @@ impl Metrics { } } + pub fn set_custody_columns(&self, label: &str, custody_columns_count: usize) { + self.custody_columns_count + .set(custody_columns_count as i64) + } + // Extra Network stats pub fn observe_block_duration_to_slot(&self, block_slot_timestamp: UnixSeconds) { match helpers::duration_from_now_to(block_slot_timestamp) { From ce1c591450b6d0db77091c49d5948d5af3b8ed74 Mon Sep 17 00:00:00 2001 From: Ekaterina Ryazantseva Date: Fri, 20 Sep 2024 10:27:17 +0200 Subject: [PATCH 2/5] cherry-pick commits --- Cargo.lock | 1 + eip_7594/Cargo.toml | 1 + eip_7594/src/lib.rs | 19 ++++++++++-- fork_choice_control/src/mutator.rs | 5 +--- fork_choice_store/src/store.rs | 39 +++++++++++++------------ p2p/src/network.rs | 6 +--- prometheus_metrics/src/metrics.rs | 47 ++++++++++++++---------------- 7 files changed, 62 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d8d0bb9..f85b2383 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2040,6 +2040,7 @@ dependencies = [ "itertools 0.12.1", "kzg", "num-traits", + "prometheus", "serde", "sha2 0.10.8", "spec_test_utils", diff --git a/eip_7594/Cargo.toml b/eip_7594/Cargo.toml index dfb3a487..aabb56f9 100644 --- a/eip_7594/Cargo.toml +++ b/eip_7594/Cargo.toml @@ -20,6 +20,7 @@ thiserror = { workspace = true } try_from_iterator = { workspace = true } typenum = { workspace = true } types = { workspace = true } +prometheus = { workspace = true } [dev-dependencies] duplicate = { workspace = true } diff --git a/eip_7594/src/lib.rs b/eip_7594/src/lib.rs index e43ae07a..691d282c 100644 --- a/eip_7594/src/lib.rs +++ b/eip_7594/src/lib.rs @@ -5,6 +5,7 @@ use helper_functions::predicates::is_valid_merkle_branch; use itertools::Itertools; use kzg as _; use num_traits::One as _; +use prometheus::Histogram; use sha2::{Digest as _, Sha256}; use ssz::{ByteVector, ContiguousList, ContiguousVector, SszHash, Uint256}; use thiserror::Error; @@ -56,7 +57,14 @@ pub enum ExtendedSampleError { AllowedFailtureOutOfRange { allowed_failures: u64 }, } -pub fn verify_kzg_proofs(data_column_sidecar: &DataColumnSidecar

) -> Result { +pub fn verify_kzg_proofs( + data_column_sidecar: &DataColumnSidecar

, + data_column_sidecar_kzg_verification_single: Option<&Histogram>, +) -> Result { + if let Some(_metric) = data_column_sidecar_kzg_verification_single { + let _timer = _metric.start_timer(); + } + let DataColumnSidecar { index, column, @@ -118,7 +126,12 @@ pub fn verify_kzg_proofs(data_column_sidecar: &DataColumnSidecar

) pub fn verify_sidecar_inclusion_proof( data_column_sidecar: &DataColumnSidecar

, + data_column_sidecar_inclusion_proof_verification: Option<&Histogram>, ) -> bool { + if let Some(_metric) = data_column_sidecar_inclusion_proof_verification { + let _timer = _metric.start_timer(); + } + let DataColumnSidecar { kzg_commitments, signed_block_header, @@ -198,7 +211,7 @@ fn get_data_columns_for_subnet(subnet_id: SubnetId) -> impl Iterator) -> Result> { let kzg_settings = settings(); @@ -223,7 +236,7 @@ pub fn compute_matrix(blobs: Vec) -> Result> { * * This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``. */ -// TODO: reconstructed_columns and columns_reconstruction_time metrics can be implemented with this function call +// TODO: implement reconstructed_columns and columns_reconstruction_time metrics pub fn recover_matrix( partial_matrix: Vec, blob_count: usize, diff --git a/fork_choice_control/src/mutator.rs b/fork_choice_control/src/mutator.rs index 32eccbee..2a194489 100644 --- a/fork_choice_control/src/mutator.rs +++ b/fork_choice_control/src/mutator.rs @@ -1153,10 +1153,7 @@ where if let Some(gossip_id) = origin.gossip_id() { P2pMessage::Accept(gossip_id).send(&self.p2p_tx); } - - if let Some(metrics) = self.metrics.as_ref() { - metrics.verified_gossip_data_column_sidecar.inc(); - } + self.accept_data_column_sidecar(&wait_group, data_column_sidecar); } Ok(DataColumnSidecarAction::Ignore) => { diff --git a/fork_choice_store/src/store.rs b/fork_choice_store/src/store.rs index 0aa431d9..a3027b5f 100644 --- a/fork_choice_store/src/store.rs +++ b/fork_choice_store/src/store.rs @@ -1806,7 +1806,7 @@ impl Store

{ metrics.data_column_sidecars_submitted_for_processing.inc(); } - let _timer = metrics + let _data_column_sidecar_verification_timer = metrics .as_ref() .map(|metrics| metrics.data_column_sidecar_verification_times.start_timer()); @@ -1866,35 +1866,32 @@ impl Store

{ )?; // [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by verify_data_column_sidecar_inclusion_proof(sidecar). - // data_column_sidecar_inclusion_proof_verification metric should be somewhere here - // or maybe inside the verify_sidecar_inclusion_proof() function - let _sidecar_inclusion_proof_timer = metrics + let _data_column_sidecar_inclusion_proof_verification = metrics .as_ref() - .map(|metrics| metrics.data_column_sidecar_inclusion_proof_verification.start_timer()); + .map(|metrics| &metrics.data_column_sidecar_inclusion_proof_verification); ensure!( - verify_sidecar_inclusion_proof(&data_column_sidecar), + verify_sidecar_inclusion_proof(&data_column_sidecar, _data_column_sidecar_inclusion_proof_verification), Error::DataColumnSidecarInvalidInclusionProof { data_column_sidecar } ); - // stop _sidecar_inclusion_proof_timer ? // [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs(sidecar). - // data_column_sidecar_kzg_verification_batch metric should be somewhere here - // or maybe inside the verify_kzg_proofs() function - // where to put data_column_sidecar_kzg_verification_single? - let _sidecar_inclusion_proof_timer = metrics + let _data_column_sidecar_kzg_verification_single = metrics .as_ref() - .map(|metrics| metrics.data_column_sidecar_kzg_verification_batch.start_timer()); - - verify_kzg_proofs(&data_column_sidecar).map_err(|error| { - Error::DataColumnSidecarInvalid { - data_column_sidecar: data_column_sidecar.clone_arc(), - error, + .map(|metrics| &metrics.data_column_sidecar_kzg_verification_single); + + verify_kzg_proofs( + &data_column_sidecar, + _data_column_sidecar_kzg_verification_single) + .map_err(|error| { + Error::DataColumnSidecarInvalid { + data_column_sidecar: data_column_sidecar.clone_arc(), + error, + } } - })?; - // stop data_column_sidecar_kzg_verification_batch ? + )?; // [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation. // Part 1/2: @@ -1985,6 +1982,10 @@ impl Store

{ } ); + if let Some(metrics) = self.metrics.as_ref() { + metrics.verified_gossip_data_column_sidecar.inc(); + } + Ok(DataColumnSidecarAction::Accept(data_column_sidecar)) } diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 4e1bbd09..2156b9ca 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -2195,7 +2195,7 @@ impl Network

{ count: u64, ) { let epoch = misc::compute_epoch_at_slot::

(start_slot); - let custody_columns = self.network_globals.custody_columns(); + let custody_columns = self.network_globals.custody_columns(epoch); // prevent node from sending excessive requests, since custody peers is not available. if self.check_good_peers_on_column_subnets(epoch) { @@ -2288,10 +2288,6 @@ impl Network

{ ); self.request(peer_id, request_id, Request::DataColumnsByRoot(request)); - let custody_columns_count = data_column_identifiers.len(); - if let Some(metrics) = self.metrics.as_ref() { - metrics.set_custody_columns("by_root", custody_columns_count); - } } fn subscribe_to_core_topics(&mut self) { diff --git a/prometheus_metrics/src/metrics.rs b/prometheus_metrics/src/metrics.rs index 9d137203..2f1b83eb 100644 --- a/prometheus_metrics/src/metrics.rs +++ b/prometheus_metrics/src/metrics.rs @@ -54,14 +54,14 @@ pub struct Metrics { pub data_column_sidecars_submitted_for_processing: IntCounter, pub verified_gossip_data_column_sidecar: IntCounter, pub data_column_sidecar_verification_times: Histogram, - pub reconstructed_columns: IntCounter, - pub columns_reconstruction_time: Histogram, + pub reconstructed_columns: IntCounter, // TODO + pub columns_reconstruction_time: Histogram, // TODO pub data_column_sidecar_computation: Histogram, pub data_column_sidecar_inclusion_proof_verification: Histogram, - pub data_column_sidecar_kzg_verification_single: Histogram, + pub data_column_sidecar_kzg_verification_single: Histogram, // TODO? pub data_column_sidecar_kzg_verification_batch: Histogram, - pub custody_columns_count: IntGauge, - + pub beacon_custody_columns_count_total: IntCounter, // TODO + // Extra Network stats gossip_block_slot_start_delay_time: Histogram, @@ -281,53 +281,53 @@ impl Metrics { )?, data_column_sidecars_submitted_for_processing: IntCounter::new( - "DATA_COLUMN_SIDECARS_SUBMITTED_FOR_PROCESSING", + "beacon_data_column_sidecar_processing_requests_total", "Number of data column sidecars submitted for processing" )?, verified_gossip_data_column_sidecar: IntCounter::new( - "VERIFIED_GOSSIP_DATA_COLUMN_SIDECAR", - "Number of gossip data column sidecar verified for propagation" + "beacon_data_column_sidecar_processing_successes_total", + "Number of data column sidecars verified for gossip" )?, data_column_sidecar_verification_times: Histogram::with_opts(histogram_opts!( - "DATA_COLUMN_SIDECAR_VERIFICATION_TIMES", - "Time taken to verify a data column sidecar" + "beacon_data_column_sidecar_gossip_verification_seconds", + "Full runtime of data column sidecars gossip verification" ))?, reconstructed_columns: IntCounter::new( - "RECONSTRUCTED_COLUMNS", + "beacon_data_availability_reconstructed_columns_total", "Total count of reconstructed columns" )?, columns_reconstruction_time: Histogram::with_opts(histogram_opts!( - "COLUMNS_RECONSTRUCTION_TIME", + "beacon_data_availability_reconstruction_time_seconds", "Time taken to reconstruct columns" ))?, data_column_sidecar_computation: Histogram::with_opts(histogram_opts!( - "DATA_COLUMN_SIDECAR_COMPUTATION", + "beacon_data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof" ))?, data_column_sidecar_inclusion_proof_verification: Histogram::with_opts(histogram_opts!( - "DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION", + "beacon_data_column_sidecar_inclusion_proof_verification_seconds", "Time taken to verify data column sidecar inclusion proof" ))?, data_column_sidecar_kzg_verification_single: Histogram::with_opts(histogram_opts!( - "DATA_COLUMN_SIDECAR_KZG_VERIFICATION_SINGLE", + "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification" ))?, data_column_sidecar_kzg_verification_batch: Histogram::with_opts(histogram_opts!( - "DATA_COLUMN_SIDECAR_KZG_VERIFICATION_BATCH", + "beacon_kzg_verification_data_column_batch_seconds", "Runtime of batched data column kzg verification" ))?, - custody_columns_count: IntGauge::new( - "CUSTODY_COLUMNS_COUNT", - "Total count of columns in custody", + beacon_custody_columns_count_total: IntCounter::new( + "beacon_custody_columns_count_total", + "Total count of columns in custody within the data availability boundary" )?, // Extra Network stats @@ -836,7 +836,9 @@ impl Metrics { default_registry.register(Box::new( self.data_column_sidecar_kzg_verification_batch.clone(), ))?; - default_registry.register(Box::new(self.custody_columns_count.clone()))?; + default_registry.register(Box::new( + self.beacon_custody_columns_count_total.clone(), + ))?; default_registry.register(Box::new(self.gossip_block_slot_start_delay_time.clone()))?; default_registry.register(Box::new(self.mutator_attestations.clone()))?; default_registry.register(Box::new(self.mutator_aggregate_and_proofs.clone()))?; @@ -1079,11 +1081,6 @@ impl Metrics { } } } - - pub fn set_custody_columns(&self, label: &str, custody_columns_count: usize) { - self.custody_columns_count - .set(custody_columns_count as i64) - } // Extra Network stats pub fn observe_block_duration_to_slot(&self, block_slot_timestamp: UnixSeconds) { From b794eae299acfb9fd1fb85d015a823f444c09289 Mon Sep 17 00:00:00 2001 From: Ekaterina Ryazantseva Date: Fri, 20 Sep 2024 15:45:13 +0200 Subject: [PATCH 3/5] fix: add fixes and reconstruction metrics --- Cargo.lock | 2 +- eip_7594/Cargo.toml | 2 +- eip_7594/src/lib.rs | 32 +++++++++++++++++++++---------- fork_choice_store/src/store.rs | 13 +++---------- p2p/src/network.rs | 2 +- prometheus_metrics/src/metrics.rs | 2 +- 6 files changed, 29 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f85b2383..9e88d6b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2040,7 +2040,7 @@ dependencies = [ "itertools 0.12.1", "kzg", "num-traits", - "prometheus", + "prometheus_metrics", "serde", "sha2 0.10.8", "spec_test_utils", diff --git a/eip_7594/Cargo.toml b/eip_7594/Cargo.toml index aabb56f9..f3008061 100644 --- a/eip_7594/Cargo.toml +++ b/eip_7594/Cargo.toml @@ -20,7 +20,7 @@ thiserror = { workspace = true } try_from_iterator = { workspace = true } typenum = { workspace = true } types = { workspace = true } -prometheus = { workspace = true } +prometheus_metrics = { workspace = true } [dev-dependencies] duplicate = { workspace = true } diff --git a/eip_7594/src/lib.rs b/eip_7594/src/lib.rs index 691d282c..1149c0ff 100644 --- a/eip_7594/src/lib.rs +++ b/eip_7594/src/lib.rs @@ -5,9 +5,10 @@ use helper_functions::predicates::is_valid_merkle_branch; use itertools::Itertools; use kzg as _; use num_traits::One as _; -use prometheus::Histogram; +use prometheus_metrics::Metrics; use sha2::{Digest as _, Sha256}; use ssz::{ByteVector, ContiguousList, ContiguousVector, SszHash, Uint256}; +use std::sync::Arc; use thiserror::Error; use try_from_iterator::TryFromIterator as _; use typenum::Unsigned; @@ -59,10 +60,10 @@ pub enum ExtendedSampleError { pub fn verify_kzg_proofs( data_column_sidecar: &DataColumnSidecar

, - data_column_sidecar_kzg_verification_single: Option<&Histogram>, + metrics: &Option>, ) -> Result { - if let Some(_metric) = data_column_sidecar_kzg_verification_single { - let _timer = _metric.start_timer(); + if let Some(metrics) = metrics.as_ref() { + let _timer = metrics.data_column_sidecar_verification_times.start_timer(); } let DataColumnSidecar { @@ -126,10 +127,10 @@ pub fn verify_kzg_proofs( pub fn verify_sidecar_inclusion_proof( data_column_sidecar: &DataColumnSidecar

, - data_column_sidecar_inclusion_proof_verification: Option<&Histogram>, + metrics: &Option> ) -> bool { - if let Some(_metric) = data_column_sidecar_inclusion_proof_verification { - let _timer = _metric.start_timer(); + if let Some(metrics) = metrics.as_ref() { + let _timer = metrics.data_column_sidecar_inclusion_proof_verification.start_timer(); } let DataColumnSidecar { @@ -211,8 +212,14 @@ fn get_data_columns_for_subnet(subnet_id: SubnetId) -> impl Iterator) -> Result> { +pub fn compute_matrix( + blobs: Vec, + metrics: &Option>, + ) -> Result> { + if let Some(metrics) = metrics.as_ref() { + let _timer = metrics.data_column_sidecar_computation.start_timer(); + } + let kzg_settings = settings(); let mut matrix = vec![]; @@ -236,11 +243,16 @@ pub fn compute_matrix(blobs: Vec) -> Result> { * * This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``. */ -// TODO: implement reconstructed_columns and columns_reconstruction_time metrics +// TODO: implement reconstructed_columns metric pub fn recover_matrix( partial_matrix: Vec, blob_count: usize, + metrics: &Option> ) -> Result> { + if let Some(metrics) = metrics.as_ref() { + let _timer = metrics.columns_reconstruction_time.start_timer(); + } + let kzg_settings = settings(); let mut matrix = vec![]; diff --git a/fork_choice_store/src/store.rs b/fork_choice_store/src/store.rs index a3027b5f..ad5f4fe8 100644 --- a/fork_choice_store/src/store.rs +++ b/fork_choice_store/src/store.rs @@ -1866,25 +1866,18 @@ impl Store

{ )?; // [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by verify_data_column_sidecar_inclusion_proof(sidecar). - let _data_column_sidecar_inclusion_proof_verification = metrics - .as_ref() - .map(|metrics| &metrics.data_column_sidecar_inclusion_proof_verification); - ensure!( - verify_sidecar_inclusion_proof(&data_column_sidecar, _data_column_sidecar_inclusion_proof_verification), + verify_sidecar_inclusion_proof(&data_column_sidecar, metrics), Error::DataColumnSidecarInvalidInclusionProof { data_column_sidecar } ); // [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs(sidecar). - let _data_column_sidecar_kzg_verification_single = metrics - .as_ref() - .map(|metrics| &metrics.data_column_sidecar_kzg_verification_single); - verify_kzg_proofs( &data_column_sidecar, - _data_column_sidecar_kzg_verification_single) + metrics + ) .map_err(|error| { Error::DataColumnSidecarInvalid { data_column_sidecar: data_column_sidecar.clone_arc(), diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 2156b9ca..1dca0d6b 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -2195,7 +2195,7 @@ impl Network

{ count: u64, ) { let epoch = misc::compute_epoch_at_slot::

(start_slot); - let custody_columns = self.network_globals.custody_columns(epoch); + let custody_columns = self.network_globals.custody_columns(); // prevent node from sending excessive requests, since custody peers is not available. if self.check_good_peers_on_column_subnets(epoch) { diff --git a/prometheus_metrics/src/metrics.rs b/prometheus_metrics/src/metrics.rs index 2f1b83eb..6722cdd8 100644 --- a/prometheus_metrics/src/metrics.rs +++ b/prometheus_metrics/src/metrics.rs @@ -55,7 +55,7 @@ pub struct Metrics { pub verified_gossip_data_column_sidecar: IntCounter, pub data_column_sidecar_verification_times: Histogram, pub reconstructed_columns: IntCounter, // TODO - pub columns_reconstruction_time: Histogram, // TODO + pub columns_reconstruction_time: Histogram, pub data_column_sidecar_computation: Histogram, pub data_column_sidecar_inclusion_proof_verification: Histogram, pub data_column_sidecar_kzg_verification_single: Histogram, // TODO? From ee87ae42c70a90665386375c35be54477244847f Mon Sep 17 00:00:00 2001 From: Ekaterina Ryazantseva Date: Fri, 20 Sep 2024 18:01:15 +0200 Subject: [PATCH 4/5] fix: use param metrics --- fork_choice_store/src/store.rs | 4 ++-- p2p/src/network.rs | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/fork_choice_store/src/store.rs b/fork_choice_store/src/store.rs index ad5f4fe8..54618357 100644 --- a/fork_choice_store/src/store.rs +++ b/fork_choice_store/src/store.rs @@ -1802,7 +1802,7 @@ impl Store

{ mut verifier: impl Verifier + Send, metrics: &Option>, ) -> Result> { - if let Some(metrics) = self.metrics.as_ref() { + if let Some(metrics) = metrics.as_ref() { metrics.data_column_sidecars_submitted_for_processing.inc(); } @@ -1975,7 +1975,7 @@ impl Store

{ } ); - if let Some(metrics) = self.metrics.as_ref() { + if let Some(metrics) = metrics.as_ref() { metrics.verified_gossip_data_column_sidecar.inc(); } diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 1dca0d6b..265dc6a3 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -2275,7 +2275,6 @@ impl Network

{ let request = DataColumnsByRootRequest::new( data_column_identifiers - .clone() .try_into() .expect("length is under maximum"), ); From f44c44ad373af8989c45816c0125dfd3ae3f2e8a Mon Sep 17 00:00:00 2001 From: Ekaterina Ryazantseva Date: Fri, 20 Sep 2024 18:21:54 +0200 Subject: [PATCH 5/5] delete store metrics field --- fork_choice_control/src/controller.rs | 1 - fork_choice_store/src/store.rs | 3 --- 2 files changed, 4 deletions(-) diff --git a/fork_choice_control/src/controller.rs b/fork_choice_control/src/controller.rs index bdfaff24..80b4c0af 100644 --- a/fork_choice_control/src/controller.rs +++ b/fork_choice_control/src/controller.rs @@ -111,7 +111,6 @@ where anchor_block, anchor_state, finished_initial_forward_sync, - metrics.clone(), ); store.apply_tick(tick)?; diff --git a/fork_choice_store/src/store.rs b/fork_choice_store/src/store.rs index 54618357..a4b2f481 100644 --- a/fork_choice_store/src/store.rs +++ b/fork_choice_store/src/store.rs @@ -217,7 +217,6 @@ pub struct Store { rejected_block_roots: HashSet, finished_initial_forward_sync: bool, custody_columns: HashSet, - metrics: Option>, } impl Store

{ @@ -229,7 +228,6 @@ impl Store

{ anchor_block: Arc>, anchor_state: Arc>, finished_initial_forward_sync: bool, - metrics: Option>, ) -> Self { let block_root = anchor_block.message().hash_tree_root(); let state_root = anchor_state.hash_tree_root(); @@ -290,7 +288,6 @@ impl Store

{ rejected_block_roots: HashSet::default(), finished_initial_forward_sync, custody_columns: HashSet::default(), - metrics, } }