diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index d62bf15a9..4e1083f2e 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -68,6 +68,9 @@ pub struct Config { /// A scale factor to apply to memory allocation bounds pub ram_scale: f64, + + /// The number of extra days of data from pruning point to keep + pub keep_extra_days_data: u32, } impl Config { @@ -95,6 +98,7 @@ impl Config { initial_utxo_set: Default::default(), disable_upnp: false, ram_scale: 1.0, + keep_extra_days_data: 0, } } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 2de19c265..68fc40199 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -25,7 +25,7 @@ use crate::{ use crossbeam_channel::Receiver as CrossbeamReceiver; use itertools::Itertools; use kaspa_consensus_core::{ - blockhash::ORIGIN, + blockhash::{BlockHashExtensions, ORIGIN}, blockstatus::BlockStatus::StatusHeaderOnly, config::Config, muhash::MuHashExtensions, @@ -378,13 +378,16 @@ impl PruningProcessor { drop(tips_write); } + // Adjust the pruning point back if needed + let adjusted_root = self.adjust_for_extra_days(new_pruning_point); + // Now we traverse the anti-future of the new pruning point starting from origin and going up. // The most efficient way to traverse the entire DAG from the bottom-up is via the reachability tree let mut queue = VecDeque::::from_iter(reachability_read.get_children(ORIGIN).unwrap().iter().copied()); let (mut counter, mut traversed) = (0, 0); info!("Header and Block pruning: starting traversal from: {} (genesis: {})", queue.iter().reusable_format(", "), genesis); while let Some(current) = queue.pop_front() { - if reachability_read.is_dag_ancestor_of_result(new_pruning_point, current).unwrap() { + if reachability_read.is_dag_ancestor_of_result(adjusted_root, current).unwrap() { continue; } traversed += 1; @@ -517,12 +520,46 @@ impl PruningProcessor { // Set the history root to the new pruning point only after we successfully pruned its past let mut pruning_point_write = self.pruning_point_store.write(); let mut batch = WriteBatch::default(); - pruning_point_write.set_history_root(&mut batch, new_pruning_point).unwrap(); + pruning_point_write.set_history_root(&mut batch, adjusted_root).unwrap(); self.db.write(batch).unwrap(); drop(pruning_point_write); } } + /// Adjusts the passed hash backwards through the selected parent chain until there's enough + /// to accommodate the configured extra number of days of data + fn adjust_for_extra_days(&self, reference_hash: Hash) -> Hash { + // Short circuit if not keeping extra days to avoid doing store lookups + if self.config.keep_extra_days_data == 0 { + return reference_hash; + } + + let pp_reference_timestamp = self.headers_store.get_compact_header_data(reference_hash).unwrap().timestamp; + // days * seconds/day * milliseconds/second + let extra_days_ms = self.config.keep_extra_days_data as u64 * 86400 * 1000; + + let mut adjusted_hash = reference_hash; + + while pp_reference_timestamp.saturating_sub(self.headers_store.get_compact_header_data(adjusted_hash).unwrap().timestamp) + < extra_days_ms + { + let selected_parent = if let Ok(selected_parent) = self.ghostdag_store.get_selected_parent(adjusted_hash) { + selected_parent + } else { + break; + }; + + if selected_parent.is_origin() || !self.headers_store.has(selected_parent).unwrap() { + // Can't go further back + break; + } + + adjusted_hash = selected_parent; + } + + adjusted_hash + } + fn past_pruning_points(&self) -> BlockHashSet { (0..self.pruning_point_store.read().get().unwrap().index) .map(|index| self.past_pruning_points_store.get(index).unwrap()) diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 56dd7c1de..746f3e749 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -90,6 +90,7 @@ pub struct Args { #[serde(rename = "nogrpc")] pub disable_grpc: bool, pub ram_scale: f64, + pub keep_extra_days_data: u32, } impl Default for Args { @@ -140,6 +141,7 @@ impl Default for Args { disable_dns_seeding: false, disable_grpc: false, ram_scale: 1.0, + keep_extra_days_data: 0, } } } @@ -159,6 +161,7 @@ impl Args { config.p2p_listen_address = self.listen.unwrap_or(ContextualNetAddress::unspecified()); config.externalip = self.externalip.map(|v| v.normalize(config.default_p2p_port())); config.ram_scale = self.ram_scale; + config.keep_extra_days_data = self.keep_extra_days_data; #[cfg(feature = "devnet-prealloc")] if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { @@ -369,6 +372,13 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance especially for syncing peers faster"), ) + .arg( + Arg::new("keep-extra-days-data") + .long("keep-extra-days-data") + .require_equals(true) + .value_parser(clap::value_parser!(u32)) + .help("Keep an extra N number of days of data before the pruning point after each pruning period") + ) ; #[cfg(feature = "devnet-prealloc")] @@ -448,6 +458,7 @@ impl Args { disable_dns_seeding: arg_match_unwrap_or::(&m, "nodnsseed", defaults.disable_dns_seeding), disable_grpc: arg_match_unwrap_or::(&m, "nogrpc", defaults.disable_grpc), ram_scale: arg_match_unwrap_or::(&m, "ram-scale", defaults.ram_scale), + keep_extra_days_data: arg_match_unwrap_or::(&m, "keep-extra-days-data", defaults.keep_extra_days_data), #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: m.get_one::("num-prealloc-utxos").cloned(),