From b94c95bd0777386e0de519bc0bdd1b678fe556b3 Mon Sep 17 00:00:00 2001 From: Nikolai Golub Date: Thu, 19 Oct 2023 17:12:40 +0200 Subject: [PATCH 1/5] Remove `NativeDB::set_value` in favour of `set_values` (#1076) * Remove `NativeDB::set_value` in favour of `set_values` As it is only used in test. Explicit use of `anyhow::Result` instead of importing it, I believe it improves readability. * Generalize set_values over iterator * Bring back `open_cf` with slight rename to better reflect difference --- full-node/db/sov-db/src/native_db.rs | 9 +-- full-node/db/sov-schema-db/src/lib.rs | 68 ++++++++++++------- module-system/sov-state/src/prover_storage.rs | 3 +- 3 files changed, 46 insertions(+), 34 deletions(-) diff --git a/full-node/db/sov-db/src/native_db.rs b/full-node/db/sov-db/src/native_db.rs index e293968d2..7cb3dceec 100644 --- a/full-node/db/sov-db/src/native_db.rs +++ b/full-node/db/sov-db/src/native_db.rs @@ -43,15 +43,10 @@ impl NativeDB { .map(Option::flatten) } - /// Sets a key-value pair in the [`NativeDB`]. - pub fn set_value(&self, key: Vec, value: Option>) -> anyhow::Result<()> { - self.set_values(vec![(key, value)]) - } - /// Sets a sequence of key-value pairs in the [`NativeDB`]. The write is atomic. pub fn set_values( &self, - key_value_pairs: Vec<(Vec, Option>)>, + key_value_pairs: impl IntoIterator, Option>)>, ) -> anyhow::Result<()> { let batch = SchemaBatch::default(); for (key, value) in key_value_pairs { @@ -160,7 +155,7 @@ mod tests { let db = NativeDB::with_path(tmpdir.path()).unwrap(); let key = b"deleted".to_vec(); - db.set_value(key.clone(), None).unwrap(); + db.set_values(vec![(key.clone(), None)]).unwrap(); assert_eq!(db.get_value_option(&key).unwrap(), None); } diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index 9837ff2df..3a39d64d3 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -22,7 +22,7 @@ use std::collections::HashMap; use std::path::Path; use std::sync::Mutex; -use anyhow::{format_err, Result}; +use anyhow::format_err; use iterator::ScanDirection; pub use iterator::{SchemaIterator, SeekKeyEncoder}; use metrics::{ @@ -30,8 +30,8 @@ use metrics::{ SCHEMADB_BATCH_PUT_LATENCY_SECONDS, SCHEMADB_DELETES, SCHEMADB_GET_BYTES, SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, }; +use rocksdb::ReadOptions; pub use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; -use rocksdb::{ColumnFamilyDescriptor, ReadOptions}; use thiserror::Error; use tracing::info; @@ -54,8 +54,8 @@ impl DB { name: &'static str, column_families: impl IntoIterator>, db_opts: &rocksdb::Options, - ) -> Result { - let db = DB::open_cf( + ) -> anyhow::Result { + let db = DB::open_with_cfds( db_opts, path, name, @@ -69,12 +69,13 @@ impl DB { } /// Open RocksDB with the provided column family descriptors. - pub fn open_cf( + /// This allows to configure options for each column family. + pub fn open_with_cfds( db_opts: &rocksdb::Options, path: impl AsRef, name: &'static str, - cfds: impl IntoIterator, - ) -> Result { + cfds: impl IntoIterator, + ) -> anyhow::Result { let inner = rocksdb::DB::open_cf_descriptors(db_opts, path, cfds)?; Ok(Self::log_construct(name, inner)) } @@ -86,7 +87,7 @@ impl DB { path: impl AsRef, name: &'static str, cfs: Vec, - ) -> Result { + ) -> anyhow::Result { let error_if_log_file_exists = false; let inner = rocksdb::DB::open_cf_for_read_only(opts, path, cfs, error_if_log_file_exists)?; @@ -102,7 +103,7 @@ impl DB { secondary_path: P, name: &'static str, cfs: Vec, - ) -> Result { + ) -> anyhow::Result { let inner = rocksdb::DB::open_cf_as_secondary(opts, primary_path, secondary_path, cfs)?; Ok(Self::log_construct(name, inner)) } @@ -113,7 +114,10 @@ impl DB { } /// Reads single record by key. - pub fn get(&self, schema_key: &impl KeyCodec) -> Result> { + pub fn get( + &self, + schema_key: &impl KeyCodec, + ) -> anyhow::Result> { let _timer = SCHEMADB_GET_LATENCY_SECONDS .with_label_values(&[S::COLUMN_FAMILY_NAME]) .start_timer(); @@ -133,7 +137,11 @@ impl DB { } /// Writes single record. - pub fn put(&self, key: &impl KeyCodec, value: &impl ValueCodec) -> Result<()> { + pub fn put( + &self, + key: &impl KeyCodec, + value: &impl ValueCodec, + ) -> anyhow::Result<()> { // Not necessary to use a batch, but we'd like a central place to bump counters. // Used in tests only anyway. let batch = SchemaBatch::new(); @@ -145,7 +153,7 @@ impl DB { &self, opts: ReadOptions, direction: ScanDirection, - ) -> Result> { + ) -> anyhow::Result> { let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; Ok(SchemaIterator::new( self.inner.raw_iterator_cf_opt(cf_handle, opts), @@ -154,27 +162,33 @@ impl DB { } /// Returns a forward [`SchemaIterator`] on a certain schema with the default read options. - pub fn iter(&self) -> Result> { + pub fn iter(&self) -> anyhow::Result> { self.iter_with_direction::(Default::default(), ScanDirection::Forward) } /// Returns a forward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn iter_with_opts(&self, opts: ReadOptions) -> Result> { + pub fn iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { self.iter_with_direction::(opts, ScanDirection::Forward) } /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. - pub fn rev_iter(&self) -> Result> { + pub fn rev_iter(&self) -> anyhow::Result> { self.iter_with_direction::(Default::default(), ScanDirection::Backward) } /// Returns a backward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn rev_iter_with_opts(&self, opts: ReadOptions) -> Result> { + pub fn rev_iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { self.iter_with_direction::(opts, ScanDirection::Backward) } /// Writes a group of records wrapped in a [`SchemaBatch`]. - pub fn write_schemas(&self, batch: SchemaBatch) -> Result<()> { + pub fn write_schemas(&self, batch: SchemaBatch) -> anyhow::Result<()> { let _timer = SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS .with_label_values(&[self.name]) .start_timer(); @@ -216,7 +230,7 @@ impl DB { Ok(()) } - fn get_cf_handle(&self, cf_name: &str) -> Result<&rocksdb::ColumnFamily> { + fn get_cf_handle(&self, cf_name: &str) -> anyhow::Result<&rocksdb::ColumnFamily> { self.inner.cf_handle(cf_name).ok_or_else(|| { format_err!( "DB::cf_handle not found for column family name: {}", @@ -225,15 +239,15 @@ impl DB { }) } - /// Flushes memtable data. This is only used for testing `get_approximate_sizes_cf` in unit - /// tests. - pub fn flush_cf(&self, cf_name: &str) -> Result<()> { + /// Flushes [MemTable](https://github.com/facebook/rocksdb/wiki/MemTable) data. + /// This is only used for testing `get_approximate_sizes_cf` in unit tests. + pub fn flush_cf(&self, cf_name: &str) -> anyhow::Result<()> { Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) } /// Returns the current RocksDB property value for the provided column family name /// and property name. - pub fn get_property(&self, cf_name: &str, property_name: &str) -> Result { + pub fn get_property(&self, cf_name: &str, property_name: &str) -> anyhow::Result { self.inner .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? .ok_or_else(|| { @@ -246,7 +260,7 @@ impl DB { } /// Creates new physical DB checkpoint in directory specified by `path`. - pub fn create_checkpoint>(&self, path: P) -> Result<()> { + pub fn create_checkpoint>(&self, path: P) -> anyhow::Result<()> { rocksdb::checkpoint::Checkpoint::new(&self.inner)?.create_checkpoint(path)?; Ok(()) } @@ -273,7 +287,11 @@ impl SchemaBatch { } /// Adds an insert/update operation to the batch. - pub fn put(&self, key: &impl KeyCodec, value: &impl ValueCodec) -> Result<()> { + pub fn put( + &self, + key: &impl KeyCodec, + value: &impl ValueCodec, + ) -> anyhow::Result<()> { let _timer = SCHEMADB_BATCH_PUT_LATENCY_SECONDS .with_label_values(&["unknown"]) .start_timer(); @@ -290,7 +308,7 @@ impl SchemaBatch { } /// Adds a delete operation to the batch. - pub fn delete(&self, key: &impl KeyCodec) -> Result<()> { + pub fn delete(&self, key: &impl KeyCodec) -> anyhow::Result<()> { let key = key.encode_key()?; self.rows .lock() diff --git a/module-system/sov-state/src/prover_storage.rs b/module-system/sov-state/src/prover_storage.rs index 73088fdee..1b1cc6c98 100644 --- a/module-system/sov-state/src/prover_storage.rs +++ b/module-system/sov-state/src/prover_storage.rs @@ -167,8 +167,7 @@ impl Storage for ProverStorage { accessory_writes .ordered_writes .iter() - .map(|(k, v_opt)| (k.key.to_vec(), v_opt.as_ref().map(|v| v.value.to_vec()))) - .collect(), + .map(|(k, v_opt)| (k.key.to_vec(), v_opt.as_ref().map(|v| v.value.to_vec()))), ) .expect("native db write must succeed"); From 421e264a5c83bdd6f251305bba81e2470e901f72 Mon Sep 17 00:00:00 2001 From: Preston Evans <32944016+preston-evans98@users.noreply.github.com> Date: Thu, 19 Oct 2023 11:10:18 -0700 Subject: [PATCH 2/5] Give preferred sequencer the ability to process "deferred blobs" early (#1072) * WIP: soft confs * Add constants to manifest * fmt * fix nit * clippy * Add custom parsing for consts * Add runtime to tests * use runtime in all tests * Bug fix; re-run const resolution on change * make priority flow general * remove dead runtime machinery * fix two more tests * Fix last test * Remove dead backup * remove unneeded dev deps * fmt * clippy * Remove hardcoded manifest test * implement 'bonus blobs' * bug fix; add tests * Make tests declarative * remove dead code * Add assertion; fix clippy * fix typo * Handle deferrd_slots = 0 * fix nit --- .../sov-blob-storage/Cargo.toml | 2 +- .../sov-blob-storage/src/call.rs | 42 ++ .../sov-blob-storage/src/capabilities.rs | 152 ++++++-- .../sov-blob-storage/src/lib.rs | 43 ++- .../tests/capability_tests.rs | 358 +++++++++++++++++- .../sov-sequencer-registry/src/lib.rs | 15 + .../sov-modules-api/src/capabilities.rs | 10 + 7 files changed, 569 insertions(+), 53 deletions(-) create mode 100644 module-system/module-implementations/sov-blob-storage/src/call.rs diff --git a/module-system/module-implementations/sov-blob-storage/Cargo.toml b/module-system/module-implementations/sov-blob-storage/Cargo.toml index c0fe6c362..fe4b59655 100644 --- a/module-system/module-implementations/sov-blob-storage/Cargo.toml +++ b/module-system/module-implementations/sov-blob-storage/Cargo.toml @@ -41,4 +41,4 @@ jmt = { workspace = true } [features] default = [] -native = ["jsonrpsee", "schemars", "serde", "serde_json", "sov-modules-api/native", "sov-state/native", "sov-sequencer-registry/native"] +native = ["jsonrpsee", "schemars", "serde", "serde_json", "sov-modules-api/native", "sov-state/native", "sov-sequencer-registry/native", "clap"] diff --git a/module-system/module-implementations/sov-blob-storage/src/call.rs b/module-system/module-implementations/sov-blob-storage/src/call.rs new file mode 100644 index 000000000..bc5219c57 --- /dev/null +++ b/module-system/module-implementations/sov-blob-storage/src/call.rs @@ -0,0 +1,42 @@ +//! Defines the CallMessages accepted by the blob storage module + +use sov_modules_api::{Context, DaSpec, WorkingSet}; + +use crate::BlobStorage; + +/// A call message for the blob storage module +#[cfg_attr( + feature = "native", + derive(serde::Serialize), + derive(serde::Deserialize), + derive(sov_modules_api::macros::CliWalletArg), + derive(schemars::JsonSchema) +)] +#[derive(borsh::BorshDeserialize, borsh::BorshSerialize, Debug, PartialEq, Clone)] +pub enum CallMessage { + /// Asks the blob selector to process up to the given number of deferred blobs early. + /// Only the preferred sequencer may send this message. + ProcessDeferredBlobsEarly { + /// The number of blobs to process early + number: u16, + }, +} + +impl BlobStorage { + pub(crate) fn handle_process_blobs_early_msg( + &self, + context: &C, + number: u16, + working_set: &mut WorkingSet, + ) { + if let Some(preferred_sequencer) = self + .sequencer_registry + .get_preferred_sequencer_rollup_address(working_set) + { + if context.sender() == &preferred_sequencer { + self.deferred_blobs_requested_for_execution_next_slot + .set(&number, working_set); + } + } + } +} diff --git a/module-system/module-implementations/sov-blob-storage/src/capabilities.rs b/module-system/module-implementations/sov-blob-storage/src/capabilities.rs index 1cf727447..ce76dd393 100644 --- a/module-system/module-implementations/sov-blob-storage/src/capabilities.rs +++ b/module-system/module-implementations/sov-blob-storage/src/capabilities.rs @@ -3,11 +3,40 @@ use sov_modules_api::capabilities::{BlobRefOrOwned, BlobSelector}; use sov_modules_api::{BlobReaderTrait, Context, DaSpec, WorkingSet}; use tracing::info; -use crate::BlobStorage; +use crate::{BlobStorage, DEFERRED_SLOTS_COUNT}; + +impl BlobStorage { + fn filter_by_allowed_sender( + &self, + b: &Da::BlobTransaction, + working_set: &mut WorkingSet, + ) -> bool { + { + let is_allowed = self + .sequencer_registry + .is_sender_allowed(&b.sender(), working_set); + // This is the best effort approach for making sure, + // that blobs do not disappear silently + // TODO: Add issue for that + if !is_allowed { + info!( + "Blob hash=0x{} from sender {} is going to be discarded", + hex::encode(b.hash()), + b.sender() + ); + } + is_allowed + } + } +} impl BlobSelector for BlobStorage { type Context = C; + // This implementation returns three categories of blobs: + // 1. Any blobs sent by the preferred sequencer ("prority blobs") + // 2. Any non-priority blobs which were sent `DEFERRED_SLOTS_COUNT` slots ago ("expiring deferred blobs") + // 3. Some additional deferred blobs needed to fill the total requested by the sequencer, if applicable. ("bonus blobs") fn get_blobs_for_this_slot<'a, I>( &self, current_blobs: I, @@ -16,71 +45,122 @@ impl BlobSelector for BlobStorage { where I: IntoIterator, { + // If `DEFERRED_SLOTS_COUNT` is 0, we don't never to do any deferred blob processing and this + // function just sorts and filters the current blobs before returning + if DEFERRED_SLOTS_COUNT == 0 { + let mut blobs = current_blobs + .into_iter() + .filter(|b| self.filter_by_allowed_sender(b, working_set)) + .map(Into::into) + .collect::>(); + if let Some(sequencer) = self.get_preferred_sequencer(working_set) { + blobs.sort_by_key(|b: &BlobRefOrOwned| { + b.as_ref().sender() != sequencer + }); + } + return Ok(blobs.into_iter().map(Into::into).collect()); + } + + // Calculate any expiring deferred blobs first, since these have to be processed no matter what (Case 2 above). + // Note that we have to handle this case even if there is no preferred sequencer, since that sequencer might have + // exited while there were deferred blobs waiting to be processed let current_slot: TransitionHeight = self.get_current_slot_height(working_set); - let past_deferred: Vec = current_slot - .checked_sub(self.get_deferred_slots_count(working_set)) - .map(|pull_from_slot| self.take_blobs_for_slot_height(pull_from_slot, working_set)) - .unwrap_or_default(); - let preferred_sequencer = self.get_preferred_sequencer(working_set); + let slot_for_expiring_blobs = + current_slot.saturating_sub(self.get_deferred_slots_count(working_set)); + let expiring_deferred_blobs: Vec = + self.take_blobs_for_slot_height(slot_for_expiring_blobs, working_set); - let preferred_sequencer = if let Some(sequencer) = preferred_sequencer { + // If there is no preferred sequencer, that's all we need to do + let preferred_sequencer = if let Some(sequencer) = self.get_preferred_sequencer(working_set) + { sequencer } else { // TODO: https://github.com/Sovereign-Labs/sovereign-sdk/issues/654 // Prevent double number of blobs being executed - return Ok(past_deferred + return Ok(expiring_deferred_blobs .into_iter() .map(Into::into) .chain(current_blobs.into_iter().map(Into::into)) .collect()); }; + // If we reach this point, there is a preferred sequencer, so we need to handle cases 1 and 3. + + // First, compute any "bonus blobs" requested + // to be processed early. + let num_bonus_blobs_requested = self + .deferred_blobs_requested_for_execution_next_slot + .get(working_set) + .unwrap_or_default(); + self.deferred_blobs_requested_for_execution_next_slot + .set(&0, working_set); + + let mut remaining_blobs_requested = + (num_bonus_blobs_requested as usize).saturating_sub(expiring_deferred_blobs.len()); + let mut bonus_blobs: Vec> = + Vec::with_capacity(remaining_blobs_requested); + let mut next_slot_to_check = slot_for_expiring_blobs + 1; + // We only need to check slots up to the current slot, since deferred blobs from the current + // slot haven't been stored yet. We'll handle those later. + while remaining_blobs_requested > 0 && next_slot_to_check < current_slot { + let mut blobs_from_next_slot = + self.take_blobs_for_slot_height(next_slot_to_check, working_set); + + // If the set of deferred blobs from the next slot in line contains more than the remainder needed to fill the request, + // we split that group and save the unused portion back into state + if blobs_from_next_slot.len() > remaining_blobs_requested { + let blobs_to_save: Vec<::BlobTransaction> = + blobs_from_next_slot.split_off(remaining_blobs_requested); + bonus_blobs.extend(blobs_from_next_slot.into_iter().map(Into::into)); + self.store_blobs( + next_slot_to_check, + &blobs_to_save.iter().collect::>(), + working_set, + )?; + remaining_blobs_requested = 0; + break; + } else { + remaining_blobs_requested -= blobs_from_next_slot.len(); + bonus_blobs.extend(blobs_from_next_slot.into_iter().map(Into::into)); + } + next_slot_to_check += 1; + } + + // Finally handle any new blobs which appeared on the DA layer in this slot let mut priority_blobs = Vec::new(); let mut to_defer: Vec<&mut Da::BlobTransaction> = Vec::new(); - for blob in current_blobs { + // Blobs from the preferred sequencer get priority if blob.sender() == preferred_sequencer { priority_blobs.push(blob); } else { - to_defer.push(blob); + // Other blobs get deferred unless the sequencer has requested otherwise + if remaining_blobs_requested > 0 { + remaining_blobs_requested -= 1; + bonus_blobs.push(blob.into()) + } else { + to_defer.push(blob); + } } } + // Save any blobs that need deferring if !to_defer.is_empty() { // TODO: https://github.com/Sovereign-Labs/sovereign-sdk/issues/655 // Gas metering suppose to prevent saving blobs from not allowed senders if they exit mid-slot let to_defer: Vec<&Da::BlobTransaction> = to_defer .iter() - .filter(|b| { - let is_allowed = self - .sequencer_registry - .is_sender_allowed(&b.sender(), working_set); - // This is the best effort approach for making sure, - // that blobs do not disappear silently - // TODO: Add issue for that - if !is_allowed { - info!( - "Blob hash=0x{} from sender {} is going to be discarded", - hex::encode(b.hash()), - b.sender() - ); - } - is_allowed - }) + .filter(|b| self.filter_by_allowed_sender(b, working_set)) .map(|b| &**b) .collect(); self.store_blobs(current_slot, &to_defer, working_set)? } - if !priority_blobs.is_empty() { - Ok(priority_blobs - .into_iter() - .map(Into::into) - .chain(past_deferred.into_iter().map(Into::into)) - .collect()) - } else { - // No blobs from preferred sequencer, nothing to save, older blobs have priority - Ok(past_deferred.into_iter().map(Into::into).collect()) - } + Ok(priority_blobs + .into_iter() + .map(Into::into) + .chain(expiring_deferred_blobs.into_iter().map(Into::into)) + .chain(bonus_blobs) + .collect()) } } diff --git a/module-system/module-implementations/sov-blob-storage/src/lib.rs b/module-system/module-implementations/sov-blob-storage/src/lib.rs index 278881989..0ca8d962e 100644 --- a/module-system/module-implementations/sov-blob-storage/src/lib.rs +++ b/module-system/module-implementations/sov-blob-storage/src/lib.rs @@ -1,6 +1,8 @@ #![deny(missing_docs)] #![doc = include_str!("../README.md")] +mod call; +pub use call::CallMessage; mod capabilities; #[cfg(feature = "native")] mod query; @@ -9,7 +11,7 @@ mod query; pub use query::*; use sov_chain_state::TransitionHeight; use sov_modules_api::macros::config_constant; -use sov_modules_api::{Module, ModuleInfo, StateMap, WorkingSet}; +use sov_modules_api::{Module, ModuleInfo, StateMap, StateValue, WorkingSet}; /// For how many slots deferred blobs are stored before being executed #[config_constant] @@ -28,7 +30,14 @@ pub struct BlobStorage /// DA block number => vector of blobs /// Caller controls the order of blobs in the vector #[state] - pub(crate) blobs: StateMap>>, + pub(crate) deferred_blobs: StateMap>>, + + /// The number of deferred blobs which the preferred sequencer has asked to have executed during the next slot. + /// This request will be honored unless: + /// 1. More blobs have reached the maximum deferral period than the sequencer requests. In that case, all of those blobs will still be executed + /// 2. The sequencer requests more blobs than are in the deferred queue. In that case, all of the blobs in the deferred queue will be executed. + #[state] + pub(crate) deferred_blobs_requested_for_execution_next_slot: StateValue, #[module] pub(crate) sequencer_registry: sov_sequencer_registry::SequencerRegistry, @@ -50,7 +59,8 @@ impl BlobStorage BlobStorage, ) -> Vec { - self.blobs + self.deferred_blobs .remove(&slot_height, working_set) .unwrap_or_default() .iter() @@ -69,7 +79,6 @@ impl BlobStorage, @@ -93,5 +102,27 @@ impl BlobStorage Module for BlobStorage { type Context = C; type Config = (); - type CallMessage = sov_modules_api::NonInstantiable; + type CallMessage = CallMessage; + + fn genesis( + &self, + _config: &Self::Config, + _working_set: &mut WorkingSet, + ) -> Result<(), sov_modules_api::Error> { + Ok(()) + } + + fn call( + &self, + message: Self::CallMessage, + context: &Self::Context, + working_set: &mut WorkingSet, + ) -> Result { + match message { + CallMessage::ProcessDeferredBlobsEarly { number } => { + self.handle_process_blobs_early_msg(context, number, working_set); + Ok(Default::default()) + } + } + } } diff --git a/module-system/module-implementations/sov-blob-storage/tests/capability_tests.rs b/module-system/module-implementations/sov-blob-storage/tests/capability_tests.rs index 82e59faed..2633a0c1e 100644 --- a/module-system/module-implementations/sov-blob-storage/tests/capability_tests.rs +++ b/module-system/module-implementations/sov-blob-storage/tests/capability_tests.rs @@ -79,24 +79,55 @@ fn make_blobs_by_slot( #[test] fn priority_sequencer_flow_general() { - let (current_storage, runtime, genesis_root) = TestRuntime::pre_initialized(true); - let mut working_set = WorkingSet::new(current_storage.clone()); - let is_from_preferred_by_slot = [ vec![false, false, true], vec![false, true, false], vec![false, false], ]; let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + do_deferred_blob_test(blobs_by_slot, vec![]) +} + +pub struct SlotTestInfo { + pub slot_number: u64, + /// Any "requests for early processing" to be sent during this slot + pub early_processing_request_with_sender: Option<(sov_blob_storage::CallMessage, Address)>, + /// The expected number of blobs to process, if known + pub expected_blobs_to_process: Option, +} + +// Tests of the "blob deferral" logic tend to have the same structure, which is encoded in this helper: +// 1. Initialize the rollup +// 2. Calculate the expected order of blobs to be processed +// 3. In a loop... +// (Optionally) Assert that the correct number of blobs has been processed that slot +// (Optionally) Request early processing of some blobs in the next slot +// Assert that blobs are pulled out of the queue in the expected order +// 4. Assert that all blobs have been processed +fn do_deferred_blob_test( + blobs_by_slot: Vec>>, + test_info: Vec, +) { + let num_slots = blobs_by_slot.len(); + // Initialize the rollup + let (current_storage, runtime, genesis_root) = TestRuntime::pre_initialized(true); + let mut working_set = WorkingSet::new(current_storage.clone()); + + // Compute the *expected* order of blob processing. let mut expected_blobs = blobs_by_slot.iter().flatten().cloned().collect::>(); - expected_blobs.sort_by_key(|b| b.should_get_processed_in()); + expected_blobs.sort_by_key(|b| b.priority()); let mut expected_blobs = expected_blobs.into_iter(); let mut slots_iterator = blobs_by_slot .into_iter() .map(|blobs| blobs.into_iter().map(|b| b.blob).collect()) .chain(std::iter::repeat(Vec::new())); - for slot_number in 0..DEFERRED_SLOTS_COUNT + 3 { + let mut test_info = test_info.into_iter().peekable(); + let mut has_processed_blobs_early = false; + + // Loop enough times that all provided slots are processed and all deferred blobs expire + for slot_number in 0..num_slots as u64 + DEFERRED_SLOTS_COUNT { + // Run the blob selector module let slot_number_u8 = slot_number as u8; let mut slot_data = MockBlock { header: MockBlockHeader { @@ -120,16 +151,313 @@ fn priority_sequencer_flow_general() { ) .unwrap(); + // Run any extra logic provided by the test for this slot + if let Some(next_slot_info) = test_info.peek() { + if next_slot_info.slot_number == slot_number { + let next_slot_info = test_info.next().unwrap(); + // If applicable, assert that the expected number of blobs was processed + if let Some(expected) = next_slot_info.expected_blobs_to_process { + assert_eq!(expected, blobs_to_execute.len()) + } + + // If applicable, send the requested callmessage to the blob_storage module + if let Some((msg, sender)) = next_slot_info.early_processing_request_with_sender { + runtime + .blob_storage + .call(msg, &DefaultContext::new(sender), &mut working_set) + .unwrap(); + has_processed_blobs_early = true; + } + } + } + + // Check that the computed list of blobs is the one we expected for blob in blobs_to_execute { - let expected = expected_blobs.next().unwrap(); - assert!(expected.should_get_processed_in() == slot_number); - assert_blobs_are_equal(expected.blob, blob, &format!("slot {:?}", slot_number)); + let expected: BlobWithAppearance = expected_blobs.next().unwrap(); + if !has_processed_blobs_early { + assert_eq!(expected.must_be_processed_by(), slot_number); + } + assert_blobs_are_equal(expected.blob, blob, &format!("Slot {}", slot_number)); } } // Ensure that all blobs have been processed assert!(expected_blobs.next().is_none()); } +#[test] +fn bonus_blobs_are_delivered_on_request() { + // If blobs are deferred for less than two slots, "early processing" is not possible + if DEFERRED_SLOTS_COUNT < 2 { + return; + } + + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + vec![false, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = vec![ + SlotTestInfo { + slot_number: 0, + expected_blobs_to_process: Some(1), // The first slot will process the one blob from the preferred sequencer + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 4 }, + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(5), // The second slot will process four bonus blobs plus the one from the preferred sequencer + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: 2, + expected_blobs_to_process: Some(0), // The third slot won't process any blobs + early_processing_request_with_sender: None, + }, + ]; + + do_deferred_blob_test(blobs_by_slot, test_info) +} + +#[test] +fn test_deferrabl_with_small_count() { + // If blobs are deferred for less than two slots ensure that "early" processing requests do not alter + // the order of blob processing + if DEFERRED_SLOTS_COUNT > 1 { + return; + } + + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + vec![false, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = if DEFERRED_SLOTS_COUNT == 1 { + vec![ + SlotTestInfo { + slot_number: 0, + expected_blobs_to_process: Some(1), // The first slot will process the one blob from the preferred sequencer + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 8 }, + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(7), // The second slot will process seven bonus blobs plus the one from the preferred sequencer + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: 2, + expected_blobs_to_process: Some(0), // The third slot won't process any blobs + early_processing_request_with_sender: None, + }, + ] + } else { + // If the deferred slots count is 0, all blobs are processed as soon as they become available. + vec![ + SlotTestInfo { + slot_number: 0, + expected_blobs_to_process: Some(5), + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 4 }, + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(3), + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: 2, + expected_blobs_to_process: Some(2), + early_processing_request_with_sender: None, + }, + ] + }; + + do_deferred_blob_test(blobs_by_slot, test_info) +} + +// cases to handle: +// 1. Happy flow (with some bonus blobs) +// 2. Preferred sequencer exits +// 3. Too many bonus blobs requested +// 4. Bonus blobs requested just once +// 5. Bonus blob requests ignored if not preferred seq +#[test] +fn sequencer_requests_more_bonus_blobs_than_possible() { + // If blobs are deferred for less than two slots, "early processing" is not possible + if DEFERRED_SLOTS_COUNT < 2 { + return; + } + + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + vec![false, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = vec![ + SlotTestInfo { + slot_number: 0, + expected_blobs_to_process: Some(1), // The first slot will process the one blob from the preferred sequencer + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 1000 }, // Request a huge number of blobs + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(7), // The second slot will process all 7 available blobs and then halt + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: 2, + expected_blobs_to_process: Some(0), // The third slot won't process any blobs, since none are from the preferred sequencer + early_processing_request_with_sender: None, + }, + ]; + + do_deferred_blob_test(blobs_by_slot, test_info) +} + +// This test ensure that blob storage behaves as expected when it only needs to process a subset of the +// deferred blobs from a slot. +#[test] +fn some_blobs_from_slot_processed_early() { + // If blobs are deferred for less than two slots, "early processing" is not possible + if DEFERRED_SLOTS_COUNT < 2 { + return; + } + + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + vec![false, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = vec![ + SlotTestInfo { + slot_number: 0, + // The first slot will process the one blob from the preferred sequencer + expected_blobs_to_process: Some(1), + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 5 }, // Request 5 bonus blobs + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(6), // The second slot will process 5 bonus blobs plus the one from the preferred sequencer. One blob from slot two will be deferred again + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: 2, + expected_blobs_to_process: Some(0), // The third slot won't process any blobs + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT + 1, + expected_blobs_to_process: Some(1), // We process that one re-deferred bob in slot `DEFERRED_SLOTS_COUNT + 1` + early_processing_request_with_sender: None, + }, + ]; + + do_deferred_blob_test(blobs_by_slot, test_info) +} + +#[test] +fn request_one_blob_early() { + // If blobs are deferred for less than two slots, "early processing" is not possible + if DEFERRED_SLOTS_COUNT < 2 { + return; + } + + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = vec![ + SlotTestInfo { + slot_number: 0, + // The first slot will process the one blob from the preferred sequencer + expected_blobs_to_process: Some(1), + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 1 }, // Request 1 bonus blob + PREFERRED_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(2), // The second slot will process 1 bonus blob plus the one from the preferred sequencer. Three blobs from slot one will be deferred again + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT, + expected_blobs_to_process: Some(3), // We process the 3 re-deferred blobs from slot 0 in slot `DEFERRED_SLOTS_COUNT` + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT + 1, + expected_blobs_to_process: Some(2), // We process that two deferred blobs from slot 1 in slot `DEFERRED_SLOTS_COUNT + 1` + early_processing_request_with_sender: None, + }, + ]; + do_deferred_blob_test(blobs_by_slot, test_info) +} + +#[test] +fn bonus_blobs_request_ignored_if_not_from_preferred_seq() { + // If blobs are deferred for less than two slots, "early processing" is not possible + if DEFERRED_SLOTS_COUNT < 2 { + return; + } + let is_from_preferred_by_slot = [ + vec![false, false, true, false, false], + vec![false, true, false], + vec![false, false], + ]; + let blobs_by_slot: Vec<_> = make_blobs_by_slot(&is_from_preferred_by_slot); + let test_info = vec![ + SlotTestInfo { + slot_number: 0, + // The first slot will process the one blob from the preferred sequencer + expected_blobs_to_process: Some(1), + early_processing_request_with_sender: Some(( + sov_blob_storage::CallMessage::ProcessDeferredBlobsEarly { number: 1 }, // Request 1 bonus blob, but send the request from the *WRONG* address + REGULAR_SEQUENCER_ROLLUP, + )), + }, + SlotTestInfo { + slot_number: 1, + expected_blobs_to_process: Some(1), // The second slot will one blob from the preferred sequencer but no bonus blobs + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT, + expected_blobs_to_process: Some(4), // We process the 4 deferred blobs from slot 0 in slot `DEFERRED_SLOTS_COUNT` + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT + 1, + expected_blobs_to_process: Some(2), // We process that two deferred blobs from slot 1 in slot `DEFERRED_SLOTS_COUNT + 1` + early_processing_request_with_sender: None, + }, + SlotTestInfo { + slot_number: DEFERRED_SLOTS_COUNT + 2, + expected_blobs_to_process: Some(2), // We process that two deferred blobs from slot 2 in slot `DEFERRED_SLOTS_COUNT + 2` + early_processing_request_with_sender: None, + }, + ]; + do_deferred_blob_test(blobs_by_slot, test_info); +} + #[test] fn test_blobs_from_non_registered_sequencers_are_not_saved() { let (current_storage, runtime, genesis_root) = TestRuntime::pre_initialized(true); @@ -276,13 +604,23 @@ struct BlobWithAppearance { } impl BlobWithAppearance { - pub fn should_get_processed_in(&self) -> u64 { + pub fn must_be_processed_by(&self) -> u64 { if self.is_from_preferred { self.appeared_in_slot } else { self.appeared_in_slot + DEFERRED_SLOTS_COUNT } } + + /// A helper for sorting blobs be expected order. Blobs are ordered first by the slot in which the must be processed + /// Then by whether they're from the preferred sequencer. (Lower score means that an item is sorted first) + pub fn priority(&self) -> u64 { + if self.is_from_preferred { + self.appeared_in_slot * 10 + } else { + (self.appeared_in_slot + DEFERRED_SLOTS_COUNT) * 10 + 1 + } + } } #[test] @@ -301,7 +639,7 @@ fn test_blob_priority_sorting() { let mut blobs = vec![blob2, blob1]; assert!(!blobs[0].is_from_preferred); - blobs.sort_by_key(|b| b.should_get_processed_in()); + blobs.sort_by_key(|b| b.must_be_processed_by()); if DEFERRED_SLOTS_COUNT == 0 { assert!(blobs[1].is_from_preferred); } else { diff --git a/module-system/module-implementations/sov-sequencer-registry/src/lib.rs b/module-system/module-implementations/sov-sequencer-registry/src/lib.rs index 4ca6c8f1b..39faecf52 100644 --- a/module-system/module-implementations/sov-sequencer-registry/src/lib.rs +++ b/module-system/module-implementations/sov-sequencer-registry/src/lib.rs @@ -136,6 +136,21 @@ impl SequencerRegistry self.preferred_sequencer.get(working_set) } + /// Returns the rollup address of the preferred sequencer, or [`None`] it wasn't set. + /// + /// Read about [`SequencerConfig::is_preferred_sequencer`] to learn about + /// preferred sequencers. + pub fn get_preferred_sequencer_rollup_address( + &self, + working_set: &mut WorkingSet, + ) -> Option { + self.preferred_sequencer.get(working_set).map(|da_addr| { + self.allowed_sequencers + .get(&da_addr, working_set) + .expect("Preferred Sequencer must have known address on rollup") + }) + } + /// Checks whether `sender` is a registered sequencer. pub fn is_sender_allowed(&self, sender: &Da::Address, working_set: &mut WorkingSet) -> bool { self.allowed_sequencers.get(sender, working_set).is_some() diff --git a/module-system/sov-modules-api/src/capabilities.rs b/module-system/sov-modules-api/src/capabilities.rs index 9002accef..54c98db52 100644 --- a/module-system/sov-modules-api/src/capabilities.rs +++ b/module-system/sov-modules-api/src/capabilities.rs @@ -12,6 +12,7 @@ use sov_rollup_interface::da::{BlobReaderTrait, DaSpec}; use crate::{Context, WorkingSet}; /// Container type for mixing borrowed and owned blobs. +#[derive(Debug)] pub enum BlobRefOrOwned<'a, B: BlobReaderTrait> { /// Mutable reference Ref(&'a mut B), @@ -19,6 +20,15 @@ pub enum BlobRefOrOwned<'a, B: BlobReaderTrait> { Owned(B), } +impl<'a, B: BlobReaderTrait> AsRef for BlobRefOrOwned<'a, B> { + fn as_ref(&self) -> &B { + match self { + BlobRefOrOwned::Ref(r) => r, + BlobRefOrOwned::Owned(blob) => blob, + } + } +} + impl<'a, B: BlobReaderTrait> BlobRefOrOwned<'a, B> { /// Convenience method to get mutable reference to the blob pub fn as_mut_ref(&mut self) -> &mut B { From b28fae0a1fc18ef9d51838bb21c6c93cf001443c Mon Sep 17 00:00:00 2001 From: Blazej Kolad Date: Thu, 19 Oct 2023 21:17:07 +0200 Subject: [PATCH 3/5] Add `RollupTemplate` trait and reimplemnt `demo-rollup`. (#1055) * Add RollupSpec * Add RollupTemplate * cleanup * Rename DefaultContext to NativeContext * Fix lint --- Cargo.lock | 17 ++ Cargo.toml | 1 + examples/demo-rollup/Cargo.toml | 26 +-- .../demo-rollup/benches/node/rollup_bench.rs | 18 +- .../benches/node/rollup_coarse_measure.rs | 19 +- .../benches/prover/prover_bench.rs | 23 +- .../provers/risc0/guest-celestia/Cargo.lock | 3 + .../provers/risc0/guest-celestia/Cargo.toml | 5 +- .../risc0/guest-celestia/src/bin/rollup.rs | 11 +- .../provers/risc0/guest-mock/Cargo.lock | 3 + .../provers/risc0/guest-mock/Cargo.toml | 3 + .../risc0/guest-mock/src/bin/mock_da.rs | 11 +- examples/demo-rollup/src/celestia_rollup.rs | 110 +++++++++ examples/demo-rollup/src/common.rs | 124 +++++++++++ examples/demo-rollup/src/lib.rs | 18 +- examples/demo-rollup/src/main.rs | 68 ++++-- examples/demo-rollup/src/mock_rollup.rs | 99 +++++++++ examples/demo-rollup/stf/src/lib.rs | 73 +----- examples/demo-rollup/stf/src/runtime.rs | 1 + examples/demo-rollup/stf/src/tests/mod.rs | 3 +- examples/demo-rollup/tests/bank/mod.rs | 15 +- examples/demo-rollup/tests/evm/mod.rs | 7 +- examples/demo-rollup/tests/test_helpers.rs | 21 +- examples/demo-simple-stf/src/lib.rs | 10 +- examples/demo-simple-stf/tests/stf_test.rs | 2 +- full-node/sov-stf-runner/src/lib.rs | 14 +- full-node/sov-stf-runner/src/runner.rs | 15 +- full-node/sov-stf-runner/src/verifier.rs | 2 +- .../src/chain_state/helpers.rs | 4 +- .../src/chain_state/tests.rs | 3 +- .../sov-modules-rollup-template/Cargo.toml | 27 +++ .../sov-modules-rollup-template/README.md | 3 + .../sov-modules-rollup-template/src/lib.rs | 208 ++++++++++++++++++ .../src/app_template.rs | 4 +- .../sov-modules-stf-template/src/lib.rs | 5 +- module-system/sov-state/src/prover_storage.rs | 13 +- module-system/sov-state/src/storage.rs | 5 + packages_to_publish.yml | 1 + rollup-interface/src/state_machine/stf.rs | 2 +- rollup-interface/src/state_machine/zk/mod.rs | 2 +- .../risc0/guest-mock/src/bin/mock_da.rs | 15 +- sov-rollup-starter/src/rollup.rs | 2 +- sov-rollup-starter/stf/src/builder.rs | 2 +- sov-rollup-starter/stf/src/runtime.rs | 11 +- sov-rollup-starter/tests/bank/mod.rs | 1 + 45 files changed, 802 insertions(+), 228 deletions(-) create mode 100644 examples/demo-rollup/src/celestia_rollup.rs create mode 100644 examples/demo-rollup/src/common.rs create mode 100644 examples/demo-rollup/src/mock_rollup.rs create mode 100644 module-system/sov-modules-rollup-template/Cargo.toml create mode 100644 module-system/sov-modules-rollup-template/README.md create mode 100644 module-system/sov-modules-rollup-template/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9a417591a..a24fcaf74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8534,6 +8534,7 @@ dependencies = [ "sov-evm", "sov-ledger-rpc", "sov-modules-api", + "sov-modules-rollup-template", "sov-modules-stf-template", "sov-nft-module", "sov-risc0-adapter", @@ -8705,6 +8706,22 @@ dependencies = [ "trybuild", ] +[[package]] +name = "sov-modules-rollup-template" +version = "0.2.0" +dependencies = [ + "anyhow", + "async-trait", + "jsonrpsee 0.20.2", + "sov-db", + "sov-modules-api", + "sov-modules-stf-template", + "sov-rollup-interface", + "sov-state", + "sov-stf-runner", + "tokio", +] + [[package]] name = "sov-modules-stf-template" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 2551f3872..9932ee828 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ members = [ "module-system/sov-cli", "module-system/sov-modules-stf-template", + "module-system/sov-modules-rollup-template", "module-system/sov-modules-macros", "module-system/sov-state", "module-system/sov-modules-api", diff --git a/examples/demo-rollup/Cargo.toml b/examples/demo-rollup/Cargo.toml index 57cc9e67b..e1479e43f 100644 --- a/examples/demo-rollup/Cargo.toml +++ b/examples/demo-rollup/Cargo.toml @@ -17,26 +17,18 @@ sov-celestia-adapter = { path = "../../adapters/celestia", features = [ "native", ] } const-rollup-config = { path = "../const-rollup-config" } -sov-stf-runner = { path = "../../full-node/sov-stf-runner", features = [ - "native", -] } -sov-rollup-interface = { path = "../../rollup-interface", features = [ - "native", -] } -sov-modules-stf-template = { path = "../../module-system/sov-modules-stf-template", features = [ - "native", -] } -sov-modules-api = { path = "../../module-system/sov-modules-api", features = [ - "native", -] } +sov-stf-runner = { path = "../../full-node/sov-stf-runner", features = ["native"] } +sov-rollup-interface = { path = "../../rollup-interface", features = ["native"] } + +sov-modules-rollup-template = { path = "../../module-system/sov-modules-rollup-template" } +sov-modules-stf-template = { path = "../../module-system/sov-modules-stf-template", features = ["native"] } +sov-modules-api = { path = "../../module-system/sov-modules-api", features = ["native"] } sov-nft-module = { path = "../../module-system/module-implementations/sov-nft-module" } demo-stf = { path = "./stf", features = ["native"] } -sov-ledger-rpc = { path = "../../full-node/sov-ledger-rpc", features = [ - "server", -] } +sov-ledger-rpc = { path = "../../full-node/sov-ledger-rpc", features = ["server",] } risc0 = { path = "./provers/risc0" } borsh = { workspace = true, features = ["bytes"] } -async-trait = { workspace = true, optional = true } +async-trait = { workspace = true } anyhow = { workspace = true } jsonrpsee = { workspace = true, features = ["http-client", "server"] } serde = { workspace = true, features = ["derive"] } @@ -97,7 +89,7 @@ default = [ ] # Deviate from convention by making the "native" feature active by default. This aligns with how this package is meant to be used (as a binary first, library second). experimental = ["default", "sov-ethereum/experimental", "reth-primitives", "secp256k1", "demo-stf/experimental", "sov-ethereum/local"] -bench = ["async-trait", "hex", "sov-risc0-adapter/bench", "sov-zk-cycle-macros/bench", "risc0/bench"] +bench = ["hex", "sov-risc0-adapter/bench", "sov-zk-cycle-macros/bench", "risc0/bench"] offchain = ["demo-stf/offchain"] [[bench]] diff --git a/examples/demo-rollup/benches/node/rollup_bench.rs b/examples/demo-rollup/benches/node/rollup_bench.rs index f20c25083..d15391dac 100644 --- a/examples/demo-rollup/benches/node/rollup_bench.rs +++ b/examples/demo-rollup/benches/node/rollup_bench.rs @@ -6,18 +6,31 @@ use std::time::Duration; use anyhow::Context; use criterion::{criterion_group, criterion_main, Criterion}; use demo_stf::genesis_config::{get_genesis_config, GenesisPaths}; -use demo_stf::App; +use demo_stf::runtime::Runtime; use sov_db::ledger_db::{LedgerDB, SlotCommit}; +use sov_modules_api::default_context::DefaultContext; +use sov_modules_stf_template::AppTemplate; use sov_risc0_adapter::host::Risc0Verifier; use sov_rng_da_service::{RngDaService, RngDaSpec}; +use sov_rollup_interface::da::DaSpec; use sov_rollup_interface::mocks::{ MockAddress, MockBlock, MockBlockHeader, MOCK_SEQUENCER_DA_ADDRESS, }; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::stf::StateTransitionFunction; +use sov_rollup_interface::zk::Zkvm; +use sov_state::{ProverStorage, Storage}; use sov_stf_runner::{from_toml_path, RollupConfig}; use tempfile::TempDir; +fn new_app( + storage_config: sov_state::config::Config, +) -> AppTemplate> { + let storage = + ProverStorage::with_config(storage_config).expect("Failed to open prover storage"); + AppTemplate::new(storage.clone()) +} + fn rollup_bench(_bench: &mut Criterion) { let start_height: u64 = 0u64; let mut end_height: u64 = 100u64; @@ -44,9 +57,8 @@ fn rollup_bench(_bench: &mut Criterion) { let storage_config = sov_state::config::Config { path: rollup_config.storage.path, }; - let demo_runner = App::::new(storage_config); + let mut demo = new_app::(storage_config); - let mut demo = demo_runner.stf; let sequencer_da_address = MockAddress::from(MOCK_SEQUENCER_DA_ADDRESS); let demo_genesis_config = get_genesis_config( diff --git a/examples/demo-rollup/benches/node/rollup_coarse_measure.rs b/examples/demo-rollup/benches/node/rollup_coarse_measure.rs index e44e21dac..606ce3f87 100644 --- a/examples/demo-rollup/benches/node/rollup_coarse_measure.rs +++ b/examples/demo-rollup/benches/node/rollup_coarse_measure.rs @@ -5,25 +5,37 @@ use std::time::{Duration, Instant}; use anyhow::Context; use demo_stf::genesis_config::{get_genesis_config, GenesisPaths}; -use demo_stf::App; +use demo_stf::runtime::Runtime; use prometheus::{Histogram, HistogramOpts, Registry}; use sov_db::ledger_db::{LedgerDB, SlotCommit}; +use sov_modules_api::default_context::DefaultContext; +use sov_modules_stf_template::AppTemplate; use sov_risc0_adapter::host::Risc0Verifier; use sov_rng_da_service::{RngDaService, RngDaSpec}; +use sov_rollup_interface::da::DaSpec; use sov_rollup_interface::mocks::{ MockAddress, MockBlock, MockBlockHeader, MOCK_SEQUENCER_DA_ADDRESS, }; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::stf::StateTransitionFunction; +use sov_rollup_interface::zk::Zkvm; +use sov_state::{ProverStorage, Storage}; use sov_stf_runner::{from_toml_path, RollupConfig}; use tempfile::TempDir; - #[macro_use] extern crate prettytable; use prettytable::Table; use sov_modules_stf_template::TxEffect; +fn new_app( + storage_config: sov_state::config::Config, +) -> AppTemplate> { + let storage = + ProverStorage::with_config(storage_config).expect("Failed to open prover storage"); + AppTemplate::new(storage.clone()) +} + fn print_times( total: Duration, apply_block_time: Duration, @@ -101,9 +113,8 @@ async fn main() -> Result<(), anyhow::Error> { let storage_config = sov_state::config::Config { path: rollup_config.storage.path, }; - let demo_runner = App::::new(storage_config); + let mut demo = new_app::(storage_config); - let mut demo = demo_runner.stf; let sequencer_da_address = MockAddress::from(MOCK_SEQUENCER_DA_ADDRESS); let demo_genesis_config = get_genesis_config( diff --git a/examples/demo-rollup/benches/prover/prover_bench.rs b/examples/demo-rollup/benches/prover/prover_bench.rs index 996b308f6..2a19dbfbe 100644 --- a/examples/demo-rollup/benches/prover/prover_bench.rs +++ b/examples/demo-rollup/benches/prover/prover_bench.rs @@ -9,7 +9,7 @@ use std::sync::{Arc, Mutex}; use anyhow::Context; use const_rollup_config::{ROLLUP_NAMESPACE_RAW, SEQUENCER_DA_ADDRESS}; use demo_stf::genesis_config::{get_genesis_config, GenesisPaths}; -use demo_stf::App; +use demo_stf::runtime::Runtime; use log4rs::config::{Appender, Config, Root}; use regex::Regex; use risc0::ROLLUP_ELF; @@ -17,14 +17,26 @@ use sov_celestia_adapter::types::{FilteredCelestiaBlock, Namespace}; use sov_celestia_adapter::verifier::address::CelestiaAddress; use sov_celestia_adapter::verifier::{CelestiaSpec, RollupParams}; use sov_celestia_adapter::CelestiaService; +use sov_modules_api::default_context::DefaultContext; use sov_modules_api::SlotData; +use sov_modules_stf_template::AppTemplate; use sov_risc0_adapter::host::Risc0Host; +use sov_rollup_interface::da::DaSpec; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::stf::StateTransitionFunction; -use sov_rollup_interface::zk::ZkvmHost; +use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; +use sov_state::{ProverStorage, Storage}; use sov_stf_runner::{from_toml_path, RollupConfig}; use tempfile::TempDir; +fn new_app( + storage_config: sov_state::config::Config, +) -> AppTemplate> { + let storage = + ProverStorage::with_config(storage_config).expect("Failed to open prover storage"); + AppTemplate::new(storage.clone()) +} + #[derive(Debug)] struct RegexAppender { regex: Regex, @@ -167,7 +179,8 @@ async fn main() -> Result<(), anyhow::Error> { let storage_config = sov_state::config::Config { path: rollup_config.storage.path, }; - let mut app: App = App::new(storage_config); + + let mut demo = new_app::(storage_config); let sequencer_da_address = CelestiaAddress::from_str(SEQUENCER_DA_ADDRESS).unwrap(); @@ -178,9 +191,7 @@ async fn main() -> Result<(), anyhow::Error> { Default::default(), ); println!("Starting from empty storage, initialization chain"); - let mut prev_state_root = app.stf.init_chain(genesis_config); - - let mut demo = app.stf; + let mut prev_state_root = demo.init_chain(genesis_config); let hex_data = read_to_string("benches/prover/blocks.hex").expect("Failed to read data"); let bincoded_blocks: Vec = hex_data diff --git a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock index 387ba3b43..8b1d3159e 100644 --- a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock @@ -2011,7 +2011,10 @@ dependencies = [ "risc0-zkvm", "risc0-zkvm-platform", "sov-celestia-adapter", + "sov-modules-api", + "sov-modules-stf-template", "sov-risc0-adapter", + "sov-state", ] [[package]] diff --git a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.toml b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.toml index 40d0669c5..ef86d8320 100644 --- a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.toml +++ b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.toml @@ -14,8 +14,9 @@ demo-stf = { path = "../../../stf" } sov-risc0-adapter = { path = "../../../../../adapters/risc0" } const-rollup-config = { path = "../../../../const-rollup-config" } sov-celestia-adapter = { path = "../../../../../adapters/celestia" } - - +sov-modules-api = { path = "../../../../../module-system/sov-modules-api" } +sov-state = { path = "../../../../../module-system/sov-state" } +sov-modules-stf-template = { path = "../../../../../module-system/sov-modules-stf-template" } [patch.crates-io] sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2/v0.10.6-risc0" } diff --git a/examples/demo-rollup/provers/risc0/guest-celestia/src/bin/rollup.rs b/examples/demo-rollup/provers/risc0/guest-celestia/src/bin/rollup.rs index 7d99eaa3a..a1c2d9ffc 100644 --- a/examples/demo-rollup/provers/risc0/guest-celestia/src/bin/rollup.rs +++ b/examples/demo-rollup/provers/risc0/guest-celestia/src/bin/rollup.rs @@ -3,10 +3,14 @@ #![no_main] use const_rollup_config::ROLLUP_NAMESPACE_RAW; +use demo_stf::runtime::Runtime; +use demo_stf::AppVerifier; use sov_celestia_adapter::types::Namespace; use sov_celestia_adapter::verifier::CelestiaVerifier; +use sov_modules_api::default_context::ZkDefaultContext; +use sov_modules_stf_template::AppTemplate; use sov_risc0_adapter::guest::Risc0Guest; -use demo_stf::{create_zk_app_template, AppVerifier}; +use sov_state::ZkStorage; // The rollup stores its data in the namespace b"sov-test" on Celestia const ROLLUP_NAMESPACE: Namespace = Namespace::const_v0(ROLLUP_NAMESPACE_RAW); @@ -15,8 +19,11 @@ risc0_zkvm::guest::entry!(main); pub fn main() { let guest = Risc0Guest::new(); + let storage = ZkStorage::new(); + let app: AppTemplate> = AppTemplate::new(storage); + let mut stf_verifier = AppVerifier::new( - create_zk_app_template::(), + app, CelestiaVerifier { rollup_namespace: ROLLUP_NAMESPACE, }, diff --git a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock index 7fef493c9..a0c622f9e 100644 --- a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock @@ -925,8 +925,11 @@ dependencies = [ "demo-stf", "risc0-zkvm", "risc0-zkvm-platform", + "sov-modules-api", + "sov-modules-stf-template", "sov-risc0-adapter", "sov-rollup-interface", + "sov-state", ] [[package]] diff --git a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.toml b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.toml index bed5c1fdc..4e115298d 100644 --- a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.toml +++ b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.toml @@ -13,6 +13,9 @@ risc0-zkvm-platform = "0.18" sov-rollup-interface = { path = "../../../../../rollup-interface", features = ["mocks"] } demo-stf = { path = "../../../stf" } sov-risc0-adapter = { path = "../../../../../adapters/risc0" } +sov-modules-api = { path = "../../../../../module-system/sov-modules-api" } +sov-state = { path = "../../../../../module-system/sov-state" } +sov-modules-stf-template = { path = "../../../../../module-system/sov-modules-stf-template" } [patch.crates-io] sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2/v0.10.6-risc0" } diff --git a/examples/demo-rollup/provers/risc0/guest-mock/src/bin/mock_da.rs b/examples/demo-rollup/provers/risc0/guest-mock/src/bin/mock_da.rs index 5f5b53acb..279727fd4 100644 --- a/examples/demo-rollup/provers/risc0/guest-mock/src/bin/mock_da.rs +++ b/examples/demo-rollup/provers/risc0/guest-mock/src/bin/mock_da.rs @@ -1,15 +1,20 @@ #![no_main] +use demo_stf::runtime::Runtime; +use demo_stf::AppVerifier; +use sov_modules_api::default_context::ZkDefaultContext; +use sov_modules_stf_template::AppTemplate; use sov_risc0_adapter::guest::Risc0Guest; -use demo_stf::{create_zk_app_template, AppVerifier}; use sov_rollup_interface::mocks::MockDaVerifier; +use sov_state::ZkStorage; risc0_zkvm::guest::entry!(main); pub fn main() { let guest = Risc0Guest::new(); + let storage = ZkStorage::new(); + let app: AppTemplate> = AppTemplate::new(storage); - let mut stf_verifier = - AppVerifier::new(create_zk_app_template::(), MockDaVerifier {}); + let mut stf_verifier = AppVerifier::new(app, MockDaVerifier {}); stf_verifier .run_block(guest) diff --git a/examples/demo-rollup/src/celestia_rollup.rs b/examples/demo-rollup/src/celestia_rollup.rs new file mode 100644 index 000000000..2352f1a0b --- /dev/null +++ b/examples/demo-rollup/src/celestia_rollup.rs @@ -0,0 +1,110 @@ +use std::path::PathBuf; +use std::str::FromStr; + +use async_trait::async_trait; +use const_rollup_config::SEQUENCER_DA_ADDRESS; +use demo_stf::genesis_config::{get_genesis_config, GenesisPaths, StorageConfig}; +use demo_stf::runtime::Runtime; +use sov_celestia_adapter::verifier::address::CelestiaAddress; +use sov_celestia_adapter::verifier::{CelestiaSpec, CelestiaVerifier, RollupParams}; +use sov_celestia_adapter::{CelestiaService, DaServiceConfig}; +use sov_modules_api::default_context::{DefaultContext, ZkDefaultContext}; +use sov_modules_api::Spec; +use sov_modules_rollup_template::RollupTemplate; +use sov_risc0_adapter::host::Risc0Host; +use sov_rollup_interface::services::da::DaService; +use sov_state::{ProverStorage, Storage, ZkStorage}; +use sov_stf_runner::RollupConfig; + +use crate::common::create_rpc_methods; +#[cfg(feature = "experimental")] +use crate::common::read_eth_tx_signers; +use crate::ROLLUP_NAMESPACE; + +/// Rollup with CelestiaDa +pub struct CelestiaDemoRollup {} + +#[async_trait] +impl RollupTemplate for CelestiaDemoRollup { + type DaService = CelestiaService; + type GenesisPaths = GenesisPaths; + type Vm = Risc0Host<'static>; + + type ZkContext = ZkDefaultContext; + type NativeContext = DefaultContext; + + type ZkRuntime = Runtime; + type NativeRuntime = Runtime; + + type DaSpec = CelestiaSpec; + type DaConfig = DaServiceConfig; + + fn create_genesis_config( + &self, + genesis_paths: &Self::GenesisPaths, + ) -> >::GenesisConfig { + let sequencer_da_address = CelestiaAddress::from_str(SEQUENCER_DA_ADDRESS).unwrap(); + + #[cfg(feature = "experimental")] + let eth_signer = read_eth_tx_signers(); + + get_genesis_config( + sequencer_da_address, + genesis_paths, + #[cfg(feature = "experimental")] + eth_signer.signers(), + ) + } + + async fn create_da_service( + &self, + rollup_config: &RollupConfig, + ) -> Self::DaService { + CelestiaService::new( + rollup_config.da.clone(), + RollupParams { + namespace: ROLLUP_NAMESPACE, + }, + ) + .await + } + + fn create_vm(&self) -> Self::Vm { + Risc0Host::new(risc0::ROLLUP_ELF) + } + + fn create_verifier(&self) -> ::Verifier { + CelestiaVerifier { + rollup_namespace: ROLLUP_NAMESPACE, + } + } + + fn create_zk_storage( + &self, + _rollup_config: &RollupConfig, + ) -> ::Storage { + ZkStorage::new() + } + + fn create_native_storage( + &self, + rollup_config: &sov_stf_runner::RollupConfig, + ) -> ::Storage { + let storage_config = StorageConfig { + path: rollup_config.storage.path.clone(), + }; + ProverStorage::with_config(storage_config).expect("Failed to open prover storage") + } + + fn create_rpc_methods( + &self, + storage: &::Storage, + ledger_db: &sov_db::ledger_db::LedgerDB, + da_service: &Self::DaService, + ) -> Result, anyhow::Error> { + create_rpc_methods(storage, ledger_db, da_service.clone()) + } +} diff --git a/examples/demo-rollup/src/common.rs b/examples/demo-rollup/src/common.rs new file mode 100644 index 000000000..0a867aed2 --- /dev/null +++ b/examples/demo-rollup/src/common.rs @@ -0,0 +1,124 @@ +#[cfg(feature = "experimental")] +use std::str::FromStr; + +use anyhow::Context as _; +use demo_stf::runtime::Runtime; +#[cfg(feature = "experimental")] +use secp256k1::SecretKey; +#[cfg(feature = "experimental")] +use sov_cli::wallet_state::PrivateKeyAndAddress; +use sov_db::ledger_db::LedgerDB; +#[cfg(feature = "experimental")] +use sov_ethereum::experimental::EthRpcConfig; +#[cfg(feature = "experimental")] +use sov_ethereum::GasPriceOracleConfig; +use sov_modules_api::default_context::DefaultContext; +#[cfg(feature = "experimental")] +use sov_modules_api::default_signature::private_key::DefaultPrivateKey; +use sov_modules_api::Spec; +use sov_modules_stf_template::{SequencerOutcome, TxEffect}; +use sov_rollup_interface::da::DaSpec; +use sov_rollup_interface::services::batch_builder::BatchBuilder; +use sov_rollup_interface::services::da::DaService; +use sov_sequencer::batch_builder::FiFoStrictBatchBuilder; +use sov_sequencer::get_sequencer_rpc; +use sov_state::ProverStorage; + +#[cfg(feature = "experimental")] +const TX_SIGNER_PRIV_KEY_PATH: &str = "../test-data/keys/tx_signer_private_key.json"; + +pub(crate) fn create_rpc_methods( + storage: &::Storage, + ledger_db: &LedgerDB, + da_service: Da, +) -> Result, anyhow::Error> { + let batch_builder = create_batch_builder::<::Spec>(storage.clone()); + + let mut methods = demo_stf::runtime::get_rpc_methods::::Spec>( + storage.clone(), + ); + + methods.merge( + sov_ledger_rpc::server::rpc_module::< + LedgerDB, + SequencerOutcome<<::Spec as DaSpec>::Address>, + TxEffect, + >(ledger_db.clone())? + .remove_context(), + )?; + + register_sequencer(da_service.clone(), batch_builder, &mut methods)?; + + #[cfg(feature = "experimental")] + register_ethereum::(da_service.clone(), storage.clone(), &mut methods).unwrap(); + + Ok(methods) +} + +fn register_sequencer( + da_service: Da, + batch_builder: B, + methods: &mut jsonrpsee::RpcModule<()>, +) -> Result<(), anyhow::Error> { + let sequencer_rpc = get_sequencer_rpc(batch_builder, da_service); + methods + .merge(sequencer_rpc) + .context("Failed to merge Txs RPC modules") +} + +fn create_batch_builder( + storage: ProverStorage, +) -> FiFoStrictBatchBuilder, DefaultContext> { + let batch_size_bytes = 1024 * 100; // 100 KB + FiFoStrictBatchBuilder::new( + batch_size_bytes, + u32::MAX as usize, + Runtime::default(), + storage, + ) +} + +#[cfg(feature = "experimental")] +/// Ethereum RPC wraps EVM transaction in a rollup transaction. +/// This function reads the private key of the rollup transaction signer. +fn read_sov_tx_signer_priv_key() -> Result { + let data = std::fs::read_to_string(TX_SIGNER_PRIV_KEY_PATH).context("Unable to read file")?; + + let key_and_address: PrivateKeyAndAddress = serde_json::from_str(&data) + .unwrap_or_else(|_| panic!("Unable to convert data {} to PrivateKeyAndAddress", &data)); + + Ok(key_and_address.private_key) +} + +// TODO: #840 +#[cfg(feature = "experimental")] +pub(crate) fn read_eth_tx_signers() -> sov_ethereum::DevSigner { + sov_ethereum::DevSigner::new(vec![SecretKey::from_str( + "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + ) + .unwrap()]) +} + +#[cfg(feature = "experimental")] +// register ethereum methods. +fn register_ethereum( + da_service: Da, + storage: ProverStorage, + methods: &mut jsonrpsee::RpcModule<()>, +) -> Result<(), anyhow::Error> { + let eth_rpc_config = { + let eth_signer = read_eth_tx_signers(); + EthRpcConfig:: { + min_blob_size: Some(1), + sov_tx_signer_priv_key: read_sov_tx_signer_priv_key()?, + eth_signer, + gas_price_oracle_config: GasPriceOracleConfig::default(), + } + }; + + let ethereum_rpc = + sov_ethereum::get_ethereum_rpc::(da_service, eth_rpc_config, storage); + methods + .merge(ethereum_rpc) + .context("Failed to merge Ethereum RPC modules") +} diff --git a/examples/demo-rollup/src/lib.rs b/examples/demo-rollup/src/lib.rs index 4dbd877b9..c775c4871 100644 --- a/examples/demo-rollup/src/lib.rs +++ b/examples/demo-rollup/src/lib.rs @@ -1,22 +1,14 @@ #![deny(missing_docs)] #![doc = include_str!("../README.md")] -pub mod register_rpc; -mod rollup; - use const_rollup_config::ROLLUP_NAMESPACE_RAW; -pub use rollup::{ - new_rollup_with_celestia_da, new_rollup_with_mock_da, new_rollup_with_mock_da_from_config, - DemoProverConfig, Rollup, -}; use sov_celestia_adapter::types::Namespace; -use sov_db::ledger_db::LedgerDB; +mod mock_rollup; +pub use mock_rollup::*; +mod celestia_rollup; +pub use celestia_rollup::*; +mod common; /// The rollup stores its data in the namespace b"sov-test" on Celestia /// You can change this constant to point your rollup at a different namespace pub const ROLLUP_NAMESPACE: Namespace = Namespace::const_v0(ROLLUP_NAMESPACE_RAW); - -/// Initializes a [`LedgerDB`] using the provided `path`. -pub fn initialize_ledger(path: impl AsRef) -> LedgerDB { - LedgerDB::with_path(path).expect("Ledger DB failed to open") -} diff --git a/examples/demo-rollup/src/main.rs b/examples/demo-rollup/src/main.rs index 1726f21d0..25268c806 100644 --- a/examples/demo-rollup/src/main.rs +++ b/examples/demo-rollup/src/main.rs @@ -1,10 +1,16 @@ +use std::path::PathBuf; use std::str::FromStr; +use anyhow::Context as _; use clap::Parser; use demo_stf::genesis_config::GenesisPaths; -use risc0::{MOCK_DA_ELF, ROLLUP_ELF}; -use sov_demo_rollup::{new_rollup_with_celestia_da, new_rollup_with_mock_da, DemoProverConfig}; +use sov_demo_rollup::{CelestiaDemoRollup, MockDemoRollup}; +use sov_modules_rollup_template::{Rollup, RollupProverConfig, RollupTemplate}; use sov_risc0_adapter::host::Risc0Host; +use sov_rollup_interface::mocks::MockDaConfig; +use sov_rollup_interface::zk::ZkvmHost; +use sov_stf_runner::{from_toml_path, RollupConfig}; +use tracing::log::debug; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -40,26 +46,19 @@ async fn main() -> Result<(), anyhow::Error> { match args.da_layer.as_str() { "mock" => { - let _prover = Risc0Host::new(MOCK_DA_ELF); - let _config = DemoProverConfig::Execute; - - let rollup = new_rollup_with_mock_da::, _>( - rollup_config_path, - //Some((prover, config)), - None, + let rollup = new_rollup_with_mock_da::>( &GenesisPaths::from_dir("../test-data/genesis/integration-tests"), - )?; + rollup_config_path, + Some(RollupProverConfig::Execute), + ) + .await?; rollup.run().await } "celestia" => { - let _prover = Risc0Host::new(ROLLUP_ELF); - let _config = DemoProverConfig::Execute; - - let rollup = new_rollup_with_celestia_da::, _>( - rollup_config_path, - //Some((prover, config)), - None, + let rollup = new_rollup_with_celestia_da::>( &GenesisPaths::from_dir("../test-data/genesis/demo-tests"), + rollup_config_path, + Some(RollupProverConfig::Execute), ) .await?; rollup.run().await @@ -67,3 +66,38 @@ async fn main() -> Result<(), anyhow::Error> { da => panic!("DA Layer not supported: {}", da), } } + +pub async fn new_rollup_with_celestia_da( + genesis_paths: &GenesisPaths, + rollup_config_path: &str, + prover_config: Option, +) -> Result, anyhow::Error> { + debug!( + "Starting celestia rollup with config {}", + rollup_config_path + ); + + let rollup_config: RollupConfig = + from_toml_path(rollup_config_path).context("Failed to read rollup configuration")?; + + let mock_rollup = CelestiaDemoRollup {}; + mock_rollup + .create_new_rollup(genesis_paths, rollup_config, prover_config) + .await +} + +pub async fn new_rollup_with_mock_da( + genesis_paths: &GenesisPaths, + rollup_config_path: &str, + prover_config: Option, +) -> Result, anyhow::Error> { + debug!("Starting mock rollup with config {}", rollup_config_path); + + let rollup_config: RollupConfig = + from_toml_path(rollup_config_path).context("Failed to read rollup configuration")?; + + let mock_rollup = MockDemoRollup {}; + mock_rollup + .create_new_rollup(genesis_paths, rollup_config, prover_config) + .await +} diff --git a/examples/demo-rollup/src/mock_rollup.rs b/examples/demo-rollup/src/mock_rollup.rs new file mode 100644 index 000000000..58ef6d137 --- /dev/null +++ b/examples/demo-rollup/src/mock_rollup.rs @@ -0,0 +1,99 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +use demo_stf::genesis_config::{get_genesis_config, GenesisPaths, StorageConfig}; +use demo_stf::runtime::Runtime; +use sov_db::ledger_db::LedgerDB; +use sov_modules_api::default_context::{DefaultContext, ZkDefaultContext}; +use sov_modules_api::Spec; +use sov_modules_rollup_template::RollupTemplate; +use sov_modules_stf_template::Runtime as RuntimeTrait; +use sov_risc0_adapter::host::Risc0Host; +use sov_rollup_interface::mocks::{ + MockAddress, MockDaConfig, MockDaService, MockDaSpec, MOCK_SEQUENCER_DA_ADDRESS, +}; +use sov_rollup_interface::services::da::DaService; +use sov_state::{ProverStorage, Storage, ZkStorage}; +use sov_stf_runner::RollupConfig; + +use crate::common::create_rpc_methods; +#[cfg(feature = "experimental")] +use crate::common::read_eth_tx_signers; + +/// Rollup with MockDa +pub struct MockDemoRollup {} + +#[async_trait] +impl RollupTemplate for MockDemoRollup { + type DaService = MockDaService; + type GenesisPaths = GenesisPaths; + type Vm = Risc0Host<'static>; + + type ZkContext = ZkDefaultContext; + type NativeContext = DefaultContext; + + type ZkRuntime = Runtime; + type NativeRuntime = Runtime; + + type DaSpec = MockDaSpec; + type DaConfig = MockDaConfig; + + fn create_genesis_config( + &self, + genesis_paths: &Self::GenesisPaths, + ) -> >::GenesisConfig + { + let sequencer_da_address = MockAddress::from(MOCK_SEQUENCER_DA_ADDRESS); + + #[cfg(feature = "experimental")] + let eth_signer = read_eth_tx_signers(); + + get_genesis_config( + sequencer_da_address, + genesis_paths, + #[cfg(feature = "experimental")] + eth_signer.signers(), + ) + } + + async fn create_da_service( + &self, + _rollup_config: &RollupConfig, + ) -> Self::DaService { + MockDaService::new(MockAddress::from(MOCK_SEQUENCER_DA_ADDRESS)) + } + + fn create_vm(&self) -> Self::Vm { + Risc0Host::new(risc0::MOCK_DA_ELF) + } + + fn create_zk_storage( + &self, + _rollup_config: &RollupConfig, + ) -> ::Storage { + ZkStorage::new() + } + + fn create_verifier(&self) -> ::Verifier { + Default::default() + } + + fn create_native_storage( + &self, + rollup_config: &RollupConfig, + ) -> ::Storage { + let storage_config = StorageConfig { + path: rollup_config.storage.path.clone(), + }; + ProverStorage::with_config(storage_config).expect("Failed to open prover storage") + } + + fn create_rpc_methods( + &self, + storage: &::Storage, + ledger_db: &LedgerDB, + da_service: &Self::DaService, + ) -> Result, anyhow::Error> { + create_rpc_methods(storage, ledger_db, da_service.clone()) + } +} diff --git a/examples/demo-rollup/stf/src/lib.rs b/examples/demo-rollup/stf/src/lib.rs index bf30e33ed..2aa0c1008 100644 --- a/examples/demo-rollup/stf/src/lib.rs +++ b/examples/demo-rollup/stf/src/lib.rs @@ -9,74 +9,11 @@ mod hooks_impl; pub mod runtime; #[cfg(test)] mod tests; -use runtime::Runtime; -#[cfg(feature = "native")] -use sov_modules_api::default_context::DefaultContext; -use sov_modules_api::default_context::ZkDefaultContext; -#[cfg(feature = "native")] -use sov_modules_api::Spec; + use sov_modules_stf_template::AppTemplate; -use sov_rollup_interface::da::{DaSpec, DaVerifier}; -use sov_rollup_interface::zk::Zkvm; -#[cfg(feature = "native")] -use sov_sequencer::batch_builder::FiFoStrictBatchBuilder; -#[cfg(feature = "native")] -use sov_state::config::Config as StorageConfig; -use sov_state::ZkStorage; -#[cfg(feature = "native")] -use sov_state::{ProverStorage, Storage}; +use sov_rollup_interface::da::DaVerifier; use sov_stf_runner::verifier::StateTransitionVerifier; -/// A verifier for the demo rollup -pub type AppVerifier = StateTransitionVerifier< - AppTemplate< - ZkDefaultContext, - ::Spec, - Zk, - Runtime::Spec>, - >, - DA, - Zk, ->; - -/// Contains StateTransitionFunction and other necessary dependencies needed for implementing a full node. -#[cfg(feature = "native")] -pub struct App { - /// Concrete state transition function. - pub stf: AppTemplate>, - /// Batch builder. - pub batch_builder: Option, DefaultContext>>, -} - -#[cfg(feature = "native")] -impl App { - /// Creates a new `App`. - pub fn new(storage_config: StorageConfig) -> Self { - let storage = - ProverStorage::with_config(storage_config).expect("Failed to open prover storage"); - let app = AppTemplate::new(storage.clone(), Runtime::default()); - let batch_size_bytes = 1024 * 100; // 100 KB - let batch_builder = FiFoStrictBatchBuilder::new( - batch_size_bytes, - u32::MAX as usize, - Runtime::default(), - storage, - ); - Self { - stf: app, - batch_builder: Some(batch_builder), - } - } - - /// Gets underlying storage. - pub fn get_storage(&self) -> ::Storage { - self.stf.current_storage.clone() - } -} - -/// Create `StateTransitionFunction` for Zk context. -pub fn create_zk_app_template( -) -> AppTemplate> { - let storage = ZkStorage::new(); - AppTemplate::new(storage, Runtime::default()) -} +/// Alias for StateTransitionVerifier. +pub type AppVerifier = + StateTransitionVerifier::Spec, Vm, RT>, DA, Vm>; diff --git a/examples/demo-rollup/stf/src/runtime.rs b/examples/demo-rollup/stf/src/runtime.rs index 1d9c42a0d..3a46a4bf4 100644 --- a/examples/demo-rollup/stf/src/runtime.rs +++ b/examples/demo-rollup/stf/src/runtime.rs @@ -93,6 +93,7 @@ where C: Context, Da: DaSpec, { + type GenesisConfig = GenesisConfig; } impl BlobSelector for Runtime { diff --git a/examples/demo-rollup/stf/src/tests/mod.rs b/examples/demo-rollup/stf/src/tests/mod.rs index 478a47c3d..c2be9e6ce 100644 --- a/examples/demo-rollup/stf/src/tests/mod.rs +++ b/examples/demo-rollup/stf/src/tests/mod.rs @@ -23,9 +23,8 @@ pub(crate) fn create_new_app_template_for_tests( sov_rollup_interface::mocks::MockZkvm, Runtime, > { - let runtime = Runtime::default(); let storage = ProverStorage::with_path(path).unwrap(); - AppTemplate::new(storage, runtime) + AppTemplate::new(storage) } pub(crate) fn get_genesis_config_for_tests() -> GenesisConfig { diff --git a/examples/demo-rollup/tests/bank/mod.rs b/examples/demo-rollup/tests/bank/mod.rs index 94d39b91f..7f5de302f 100644 --- a/examples/demo-rollup/tests/bank/mod.rs +++ b/examples/demo-rollup/tests/bank/mod.rs @@ -5,17 +5,16 @@ use demo_stf::genesis_config::GenesisPaths; use demo_stf::runtime::RuntimeCall; use jsonrpsee::core::client::{Subscription, SubscriptionClientT}; use jsonrpsee::rpc_params; -use risc0::MOCK_DA_ELF; -use sov_demo_rollup::DemoProverConfig; use sov_modules_api::default_context::DefaultContext; use sov_modules_api::default_signature::private_key::DefaultPrivateKey; use sov_modules_api::transaction::Transaction; use sov_modules_api::{PrivateKey, Spec}; -use sov_risc0_adapter::host::Risc0Host; +use sov_modules_rollup_template::RollupProverConfig; use sov_rollup_interface::mocks::MockDaSpec; use sov_sequencer::utils::SimpleClient; -use super::test_helpers::start_rollup; +use crate::test_helpers::start_rollup; + const TOKEN_SALT: u64 = 0; const TOKEN_NAME: &str = "test_token"; @@ -71,19 +70,15 @@ async fn send_test_create_token_tx(rpc_address: SocketAddr) -> Result<(), anyhow async fn bank_tx_tests() -> Result<(), anyhow::Error> { let (port_tx, port_rx) = tokio::sync::oneshot::channel(); - let prover = Risc0Host::new(MOCK_DA_ELF); - let config = DemoProverConfig::Execute; - let rollup_task = tokio::spawn(async { start_rollup( port_tx, - Some((prover, config)), - &GenesisPaths::from_dir("../test-data/genesis/integration-tests"), + GenesisPaths::from_dir("../test-data/genesis/integration-tests"), + Some(RollupProverConfig::Execute), ) .await; }); - // Wait for rollup task to start: let port = port_rx.await.unwrap(); // If the rollup throws an error, return it and stop trying to send the transaction diff --git a/examples/demo-rollup/tests/evm/mod.rs b/examples/demo-rollup/tests/evm/mod.rs index 5c2b910c2..2a418a959 100644 --- a/examples/demo-rollup/tests/evm/mod.rs +++ b/examples/demo-rollup/tests/evm/mod.rs @@ -7,10 +7,9 @@ use demo_stf::genesis_config::GenesisPaths; use ethers_core::abi::Address; use ethers_signers::{LocalWallet, Signer}; use sov_evm::SimpleStorageContract; -use sov_risc0_adapter::host::Risc0Host; use test_client::TestClient; -use super::test_helpers::start_rollup; +use crate::test_helpers::start_rollup; #[cfg(feature = "experimental")] #[tokio::test] @@ -19,10 +18,10 @@ async fn evm_tx_tests() -> Result<(), anyhow::Error> { let rollup_task = tokio::spawn(async { // Don't provide a prover since the EVM is not currently provable - start_rollup::, _>( + start_rollup( port_tx, + GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, - &GenesisPaths::from_dir("../test-data/genesis/integration-tests"), ) .await; }); diff --git a/examples/demo-rollup/tests/test_helpers.rs b/examples/demo-rollup/tests/test_helpers.rs index bb4db679b..ce40dfe09 100644 --- a/examples/demo-rollup/tests/test_helpers.rs +++ b/examples/demo-rollup/tests/test_helpers.rs @@ -1,17 +1,17 @@ use std::net::SocketAddr; -use std::path::Path; +use std::path::PathBuf; use demo_stf::genesis_config::GenesisPaths; -use sov_demo_rollup::{new_rollup_with_mock_da_from_config, DemoProverConfig}; +use sov_demo_rollup::MockDemoRollup; +use sov_modules_rollup_template::{RollupProverConfig, RollupTemplate}; use sov_rollup_interface::mocks::{MockAddress, MockDaConfig}; -use sov_rollup_interface::zk::ZkvmHost; use sov_stf_runner::{RollupConfig, RpcConfig, RunnerConfig, StorageConfig}; use tokio::sync::oneshot; -pub async fn start_rollup>( +pub async fn start_rollup( rpc_reporting_channel: oneshot::Sender, - prover: Option<(Vm, DemoProverConfig)>, - genesis_paths: &GenesisPaths

, + genesis_paths: GenesisPaths, + rollup_prover_config: Option, ) { let temp_dir = tempfile::tempdir().unwrap(); let temp_path = temp_dir.path(); @@ -32,8 +32,13 @@ pub async fn start_rollup>( }, }; - let rollup = new_rollup_with_mock_da_from_config(rollup_config, prover, genesis_paths) - .expect("Rollup config is valid"); + let mock_demo_rollup = MockDemoRollup {}; + + let rollup = mock_demo_rollup + .create_new_rollup(&genesis_paths, rollup_config, rollup_prover_config) + .await + .unwrap(); + rollup .run_and_report_rpc_port(Some(rpc_reporting_channel)) .await diff --git a/examples/demo-simple-stf/src/lib.rs b/examples/demo-simple-stf/src/lib.rs index b87bfc9e8..9ca77dc55 100644 --- a/examples/demo-simple-stf/src/lib.rs +++ b/examples/demo-simple-stf/src/lib.rs @@ -27,7 +27,7 @@ impl StateTransitionFunction { // Since our rollup is stateless, we don't need to consider the StateRoot. - type StateRoot = (); + type StateRoot = [u8; 0]; // This represents the initial configuration of the rollup, but it is not supported in this tutorial. type InitialState = (); @@ -45,13 +45,13 @@ impl StateTransitionFunction [u8; 0] { + [] } fn apply_slot<'a, I>( &mut self, - _pre_state_root: &(), + _pre_state_root: &[u8; 0], _witness: Self::Witness, _slot_header: &Da::BlockHeader, _validity_condition: &Da::ValidityCondition, @@ -91,7 +91,7 @@ impl StateTransitionFunction::apply_slot( stf, - &(), + &[], (), &MockBlockHeader::default(), &MockValidityCond::default(), diff --git a/full-node/sov-stf-runner/src/lib.rs b/full-node/sov-stf-runner/src/lib.rs index 3f4b85506..afc203c3f 100644 --- a/full-node/sov-stf-runner/src/lib.rs +++ b/full-node/sov-stf-runner/src/lib.rs @@ -18,12 +18,9 @@ mod runner; pub use config::{from_toml_path, RollupConfig, RunnerConfig, StorageConfig}; #[cfg(feature = "native")] pub use runner::*; -#[cfg(feature = "native")] use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sov_rollup_interface::da::DaSpec; -use sov_rollup_interface::stf::StateTransitionFunction; -use sov_rollup_interface::zk::Zkvm; /// Implements the `StateTransitionVerifier` type for checking the validity of a state transition pub mod verifier; @@ -31,14 +28,11 @@ pub mod verifier; #[derive(Serialize, BorshDeserialize, BorshSerialize, Deserialize)] // Prevent serde from generating spurious trait bounds. The correct serde bounds are already enforced by the // StateTransitionFunction, DA, and Zkvm traits. -#[serde(bound = "")] +#[serde(bound = "StateRoot: Serialize + DeserializeOwned, Witness: Serialize + DeserializeOwned")] /// Data required to verify a state transition. -pub struct StateTransitionData, DA: DaSpec, Zk> -where - Zk: Zkvm, -{ +pub struct StateTransitionData { /// The state root before the state transition - pub pre_state_root: ST::StateRoot, + pub pre_state_root: StateRoot, /// The header of the da block that is being processed pub da_block_header: DA::BlockHeader, /// The proof of inclusion for all blobs @@ -48,7 +42,7 @@ where /// The blobs that are being processed pub blobs: Vec<::BlobTransaction>, /// The witness for the state transition - pub state_transition_witness: ST::Witness, + pub state_transition_witness: Witness, } #[cfg(feature = "native")] diff --git a/full-node/sov-stf-runner/src/runner.rs b/full-node/sov-stf-runner/src/runner.rs index 97d4e7b44..eb5463715 100644 --- a/full-node/sov-stf-runner/src/runner.rs +++ b/full-node/sov-stf-runner/src/runner.rs @@ -13,7 +13,6 @@ use crate::verifier::StateTransitionVerifier; use crate::{RunnerConfig, StateTransitionData}; type StateRoot = >::StateRoot; - type InitialState = >::InitialState; /// Combines `DaService` with `StateTransitionFunction` and "runs" the rollup. @@ -63,15 +62,7 @@ where Da: DaService + Clone + Send + Sync + 'static, Vm: ZkvmHost, V: StateTransitionFunction, - ST: StateTransitionFunction< - Vm, - Da::Spec, - StateRoot = Root, - Condition = ::ValidityCondition, - Witness = Witness, - >, - Witness: Default, - Root: Clone + AsRef<[u8]>, + ST: StateTransitionFunction::ValidityCondition>, { /// Creates a new `StateTransitionRunner`. /// @@ -83,7 +74,7 @@ where da_service: Da, ledger_db: LedgerDB, mut app: ST, - prev_state_root: Option, + prev_state_root: Option>, genesis_config: InitialState, prover: Option>, ) -> Result { @@ -186,7 +177,7 @@ where .get_extraction_proof(&filtered_block, &blobs) .await; - let transition_data: StateTransitionData = + let transition_data: StateTransitionData = StateTransitionData { pre_state_root: self.state_root.clone(), da_block_header: filtered_block.header().clone(), diff --git a/full-node/sov-stf-runner/src/verifier.rs b/full-node/sov-stf-runner/src/verifier.rs index 8b3007ffb..c3e08269a 100644 --- a/full-node/sov-stf-runner/src/verifier.rs +++ b/full-node/sov-stf-runner/src/verifier.rs @@ -34,7 +34,7 @@ where /// Verify the next block pub fn run_block(&mut self, zkvm: Zk) -> Result { - let mut data: StateTransitionData = zkvm.read_from_host(); + let mut data: StateTransitionData<_, _, Da::Spec> = zkvm.read_from_host(); let validity_condition = self.da_verifier.verify_relevant_tx_list( &data.da_block_header, &data.blobs, diff --git a/module-system/module-implementations/integration-tests/src/chain_state/helpers.rs b/module-system/module-implementations/integration-tests/src/chain_state/helpers.rs index 8405b2568..379d8538b 100644 --- a/module-system/module-implementations/integration-tests/src/chain_state/helpers.rs +++ b/module-system/module-implementations/integration-tests/src/chain_state/helpers.rs @@ -112,7 +112,9 @@ where } } -impl Runtime for TestRuntime {} +impl Runtime for TestRuntime { + type GenesisConfig = GenesisConfig; +} pub(crate) fn create_chain_state_genesis_config( admin: ::Address, diff --git a/module-system/module-implementations/integration-tests/src/chain_state/tests.rs b/module-system/module-implementations/integration-tests/src/chain_state/tests.rs index 90097d7f5..91d442d5a 100644 --- a/module-system/module-implementations/integration-tests/src/chain_state/tests.rs +++ b/module-system/module-implementations/integration-tests/src/chain_state/tests.rs @@ -22,7 +22,6 @@ type C = DefaultContext; #[test] fn test_simple_value_setter_with_chain_state() { // Build an app template with the module configurations - let runtime = TestRuntime::default(); let tmpdir = tempfile::tempdir().unwrap(); @@ -30,7 +29,7 @@ fn test_simple_value_setter_with_chain_state() { ProverStorage::with_path(tmpdir.path()).unwrap(); let mut app_template = - AppTemplate::>::new(storage, runtime); + AppTemplate::>::new(storage); let value_setter_messages = ValueSetterMessages::default(); let value_setter = value_setter_messages.create_raw_txs::>(); diff --git a/module-system/sov-modules-rollup-template/Cargo.toml b/module-system/sov-modules-rollup-template/Cargo.toml new file mode 100644 index 000000000..deadde397 --- /dev/null +++ b/module-system/sov-modules-rollup-template/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "sov-modules-rollup-template" +description = "Defines a generic rollup for use with the Sovereign SDK module system" +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } +version = { workspace = true } +readme = "README.md" +resolver = "2" + + +[dependencies] +sov-rollup-interface = { path = "../../rollup-interface", features = ["native"] } +sov-stf-runner = { path = "../../full-node/sov-stf-runner", features = ["native"] } +sov-state = { path = "../sov-state", version = "0.2" } + +sov-modules-api = { path = "../../module-system/sov-modules-api", features = ["native"] } +sov-modules-stf-template = { path = "../../module-system/sov-modules-stf-template", features = ["native"] } +sov-db = { path = "../../full-node/db/sov-db" } + +anyhow = { workspace = true } +async-trait = { workspace = true } +jsonrpsee = { workspace = true, features = ["http-client", "server"] } +tokio = { workspace = true } \ No newline at end of file diff --git a/module-system/sov-modules-rollup-template/README.md b/module-system/sov-modules-rollup-template/README.md new file mode 100644 index 000000000..be1287b82 --- /dev/null +++ b/module-system/sov-modules-rollup-template/README.md @@ -0,0 +1,3 @@ +# `sov-modules-rollup-template` +This crate contains abstractions needed to create a new rollup. +Any type that implements `RollupTemplate` trait can serve as a `sov-rollup`. diff --git a/module-system/sov-modules-rollup-template/src/lib.rs b/module-system/sov-modules-rollup-template/src/lib.rs new file mode 100644 index 000000000..3eeef534d --- /dev/null +++ b/module-system/sov-modules-rollup-template/src/lib.rs @@ -0,0 +1,208 @@ +#![deny(missing_docs)] +#![doc = include_str!("../README.md")] +use std::net::SocketAddr; + +use async_trait::async_trait; +use sov_db::ledger_db::LedgerDB; +use sov_modules_api::{Context, DaSpec, Spec}; +use sov_modules_stf_template::{AppTemplate, Runtime as RuntimeTrait}; +use sov_rollup_interface::da::DaVerifier; +use sov_rollup_interface::services::da::DaService; +use sov_rollup_interface::zk::ZkvmHost; +use sov_state::storage::NativeStorage; +use sov_stf_runner::verifier::StateTransitionVerifier; +use sov_stf_runner::{ProofGenConfig, Prover, RollupConfig, StateTransitionRunner}; +use tokio::sync::oneshot; + +/// This trait defines how to crate all the necessary dependencies required by a rollup. +#[async_trait] +pub trait RollupTemplate: Sized + Send + Sync { + /// Data Availability service. + type DaService: DaService + Clone + Send + Sync; + /// A specification for the types used by a DA layer. + type DaSpec: DaSpec + Send + Sync; + /// Data Availability config. + type DaConfig: Send + Sync; + + /// Host of a zkVM program. + type Vm: ZkvmHost + Send; + + /// Location of the genesis files. + type GenesisPaths: Send + Sync; + + /// Context for Zero Knowledge environment. + type ZkContext: Context; + /// Context for Native environment. + type NativeContext: Context; + + /// Runtime for Zero Knowledge environment. + type ZkRuntime: RuntimeTrait + Default; + /// Runtime for Native environment. + type NativeRuntime: RuntimeTrait + Default + Default; + + /// Creates RPC methods for the rollup. + fn create_rpc_methods( + &self, + storage: &::Storage, + ledger_db: &LedgerDB, + da_service: &Self::DaService, + ) -> Result, anyhow::Error>; + + /// Creates GenesisConfig from genesis files. + fn create_genesis_config( + &self, + genesis_paths: &Self::GenesisPaths, + ) -> >::GenesisConfig; + + /// Creates instance of DA Service. + async fn create_da_service( + &self, + rollup_config: &RollupConfig, + ) -> Self::DaService; + + /// Creates instance of ZK storage. + fn create_zk_storage( + &self, + rollup_config: &RollupConfig, + ) -> ::Storage; + + /// Creates instance of Native storage. + fn create_native_storage( + &self, + rollup_config: &RollupConfig, + ) -> ::Storage; + + /// Creates instance of ZkVm. + fn create_vm(&self) -> Self::Vm; + + /// Creates instance of DA Verifier. + fn create_verifier(&self) -> ::Verifier; + + /// Creates instance of a LedgerDB. + fn create_ledger_db(&self, rollup_config: &RollupConfig) -> LedgerDB { + LedgerDB::with_path(&rollup_config.storage.path).expect("Ledger DB failed to open") + } + + /// Creates a new rollup. + async fn create_new_rollup( + &self, + genesis_paths: &Self::GenesisPaths, + rollup_config: RollupConfig, + prover_config: Option, + ) -> Result, anyhow::Error> + where + ::Storage: NativeStorage, + { + let da_service = self.create_da_service(&rollup_config).await; + let ledger_db = self.create_ledger_db(&rollup_config); + let genesis_config = self.create_genesis_config(genesis_paths); + + let prover = prover_config.map(|pc| { + configure_prover( + self.create_vm(), + pc, + self.create_verifier(), + self.create_zk_storage(&rollup_config), + ) + }); + + let storage = self.create_native_storage(&rollup_config); + + let prev_root = ledger_db + .get_head_slot()? + .map(|(number, _)| storage.get_root_hash(number.0)) + .transpose()?; + + let rpc_methods = self.create_rpc_methods(&storage, &ledger_db, &da_service)?; + + let native_stf = AppTemplate::new(storage); + + let runner = StateTransitionRunner::new( + rollup_config.runner, + da_service, + ledger_db, + native_stf, + prev_root, + genesis_config, + prover, + )?; + + Ok(Rollup { + runner, + rpc_methods, + }) + } +} + +/// The possible configurations of the prover. +pub enum RollupProverConfig { + /// Run the rollup verification logic inside the current process + Simulate, + /// Run the rollup verifier in a zkVM executor + Execute, + /// Run the rollup verifier and create a SNARK of execution + Prove, +} + +/// Dependencies needed to run the rollup. +pub struct Rollup { + /// The State Transition Runner. + #[allow(clippy::type_complexity)] + pub runner: StateTransitionRunner< + AppTemplate, + S::DaService, + S::Vm, + AppTemplate::Guest, S::ZkRuntime>, + >, + /// Rpc methods for the rollup. + pub rpc_methods: jsonrpsee::RpcModule<()>, +} + +impl Rollup { + /// Runs the rollup. + pub async fn run(self) -> Result<(), anyhow::Error> { + self.run_and_report_rpc_port(None).await + } + + /// Runs the rollup. Reports rpc port to the caller using the provided channel. + pub async fn run_and_report_rpc_port( + self, + channel: Option>, + ) -> Result<(), anyhow::Error> { + let mut runner = self.runner; + runner.start_rpc_server(self.rpc_methods, channel).await; + runner.run_in_process().await?; + Ok(()) + } +} + +type AppVerifier = + StateTransitionVerifier::Spec, Zk, RT>, DA, Zk>; + +type ZkProver = Prover< + AppTemplate::Spec, ::Guest, ZkRuntime>, + Da, + Vm, +>; + +fn configure_prover< + ZkContext: Context, + Vm: ZkvmHost, + Da: DaService, + RT: RuntimeTrait::Spec> + Default, +>( + vm: Vm, + cfg: RollupProverConfig, + da_verifier: Da::Verifier, + zk_storage: ::Storage, +) -> ZkProver { + let app = AppTemplate::new(zk_storage); + let app_verifier = AppVerifier::<_, _, ZkContext, RT>::new(app, da_verifier); + + let config = match cfg { + RollupProverConfig::Simulate => ProofGenConfig::Simulate(app_verifier), + RollupProverConfig::Execute => ProofGenConfig::Execute, + RollupProverConfig::Prove => ProofGenConfig::Prover, + }; + ZkProver { vm, config } +} diff --git a/module-system/sov-modules-stf-template/src/app_template.rs b/module-system/sov-modules-stf-template/src/app_template.rs index d440b8d3b..01a810c28 100644 --- a/module-system/sov-modules-stf-template/src/app_template.rs +++ b/module-system/sov-modules-stf-template/src/app_template.rs @@ -75,9 +75,9 @@ where RT: Runtime, { /// [`AppTemplate`] constructor. - pub fn new(storage: C::Storage, runtime: RT) -> Self { + pub fn new(storage: C::Storage) -> Self { Self { - runtime, + runtime: RT::default(), current_storage: storage, checkpoint: None, phantom_vm: PhantomData, diff --git a/module-system/sov-modules-stf-template/src/lib.rs b/module-system/sov-modules-stf-template/src/lib.rs index f50825d76..a81438f30 100644 --- a/module-system/sov-modules-stf-template/src/lib.rs +++ b/module-system/sov-modules-stf-template/src/lib.rs @@ -22,7 +22,7 @@ pub use tx_verifier::RawTx; /// This trait has to be implemented by a runtime in order to be used in `AppTemplate`. pub trait Runtime: DispatchCall - + Genesis + + Genesis + TxHooks + SlotHooks + FinalizeHook @@ -33,7 +33,10 @@ pub trait Runtime: <::BlobTransaction as BlobReaderTrait>::Address, >, > + BlobSelector + + Default { + /// GenesisConfig type. + type GenesisConfig: Send + Sync; } /// The receipts of all the transactions in a batch. diff --git a/module-system/sov-state/src/prover_storage.rs b/module-system/sov-state/src/prover_storage.rs index 1b1cc6c98..2e17f462f 100644 --- a/module-system/sov-state/src/prover_storage.rs +++ b/module-system/sov-state/src/prover_storage.rs @@ -60,13 +60,6 @@ impl ProverStorage { Err(e) => panic!("Unable to read value from db: {e}"), } } - - /// Get the root hash of the tree at the requested version - pub fn get_root_hash(&self, version: Version) -> Result { - let temp_merkle: JellyfishMerkleTree<'_, StateDB, S::Hasher> = - JellyfishMerkleTree::new(&self.db); - temp_merkle.get_root_hash(version) - } } impl Storage for ProverStorage { @@ -206,4 +199,10 @@ impl NativeStorage for ProverStorage { proof, } } + + fn get_root_hash(&self, version: Version) -> Result { + let temp_merkle: JellyfishMerkleTree<'_, StateDB, S::Hasher> = + JellyfishMerkleTree::new(&self.db); + temp_merkle.get_root_hash(version) + } } diff --git a/module-system/sov-state/src/storage.rs b/module-system/sov-state/src/storage.rs index a6654d29e..b538ed50c 100644 --- a/module-system/sov-state/src/storage.rs +++ b/module-system/sov-state/src/storage.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use borsh::{BorshDeserialize, BorshSerialize}; use hex; +use jmt::Version; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sov_first_read_last_write_cache::{CacheKey, CacheValue}; @@ -168,6 +169,7 @@ pub trait Storage: Clone { + BorshSerialize + BorshDeserialize + Eq + + AsRef<[u8]> + Into<[u8; 32]>; // Require a one-way conversion from the state root to a 32-byte array. This can always be // implemented by hashing the state root even if the root itself is not 32 bytes. @@ -270,4 +272,7 @@ pub trait NativeStorage: Storage { /// Returns the value corresponding to the key or None if key is absent and a proof to /// get the value. fn get_with_proof(&self, key: StorageKey) -> StorageProof; + + /// Get the root hash of the tree at the requested version + fn get_root_hash(&self, version: Version) -> Result; } diff --git a/packages_to_publish.yml b/packages_to_publish.yml index 3c9c9dd1c..311aa3f45 100644 --- a/packages_to_publish.yml +++ b/packages_to_publish.yml @@ -9,6 +9,7 @@ - sov-modules-macros - sov-modules-api - sov-modules-stf-template +- sov-modules-rollup-template - sov-ledger-rpc # Modules diff --git a/rollup-interface/src/state_machine/stf.rs b/rollup-interface/src/state_machine/stf.rs index a6475d5e8..bbe14e726 100644 --- a/rollup-interface/src/state_machine/stf.rs +++ b/rollup-interface/src/state_machine/stf.rs @@ -95,7 +95,7 @@ pub struct SlotResult { /// - blob: Non serialised batch or anything else that can be posted on DA layer, like attestation or proof. pub trait StateTransitionFunction { /// Root hash of state merkle tree - type StateRoot: Serialize + DeserializeOwned + Clone; + type StateRoot: Serialize + DeserializeOwned + Clone + AsRef<[u8]>; /// The initial state of the rollup. type InitialState; diff --git a/rollup-interface/src/state_machine/zk/mod.rs b/rollup-interface/src/state_machine/zk/mod.rs index 0414f6dce..3cdb0e227 100644 --- a/rollup-interface/src/state_machine/zk/mod.rs +++ b/rollup-interface/src/state_machine/zk/mod.rs @@ -71,7 +71,7 @@ pub trait Zkvm { } /// A trait which is accessible from within a zkVM program. -pub trait ZkvmGuest: Zkvm { +pub trait ZkvmGuest: Zkvm + Send { /// Obtain "advice" non-deterministically from the host fn read_from_host(&self) -> T; /// Add a public output to the zkVM proof diff --git a/sov-rollup-starter/provers/risc0/guest-mock/src/bin/mock_da.rs b/sov-rollup-starter/provers/risc0/guest-mock/src/bin/mock_da.rs index 4fd9fb8c0..3ac2bcbc9 100644 --- a/sov-rollup-starter/provers/risc0/guest-mock/src/bin/mock_da.rs +++ b/sov-rollup-starter/provers/risc0/guest-mock/src/bin/mock_da.rs @@ -1,19 +1,6 @@ #![no_main] //! This binary implements the verification logic for the rollup. This is the code that runs inside //! of the zkvm in order to generate proofs for the rollup. -use stf_starter::{zk_stf, RollupVerifier}; - -use sov_risc0_adapter::guest::Risc0Guest; -use sov_rollup_interface::mocks::MockDaVerifier; - risc0_zkvm::guest::entry!(main); -pub fn main() { - let guest = Risc0Guest::new(); - - let mut stf_verifier = RollupVerifier::new(zk_stf(), MockDaVerifier {}); - - stf_verifier - .run_block(guest) - .expect("Prover must be honest"); -} +pub fn main() {} diff --git a/sov-rollup-starter/src/rollup.rs b/sov-rollup-starter/src/rollup.rs index 5619d2dff..0ef7db2e2 100644 --- a/sov-rollup-starter/src/rollup.rs +++ b/sov-rollup-starter/src/rollup.rs @@ -9,12 +9,12 @@ use sov_modules_api::Spec; use sov_modules_stf_template::{AppTemplate, SequencerOutcome, TxEffect}; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::zk::ZkvmHost; +use sov_state::storage::NativeStorage; use sov_stf_runner::{Prover, RollupConfig, RunnerConfig, StateTransitionRunner}; use stf_starter::{get_rpc_methods, GenesisConfig, Runtime, StfWithBuilder}; use tokio::sync::oneshot; use crate::register_rpc::register_sequencer; - type ZkStf = AppTemplate>; /// Dependencies needed to run the rollup. diff --git a/sov-rollup-starter/stf/src/builder.rs b/sov-rollup-starter/stf/src/builder.rs index be1f6f2de..87470d9fe 100644 --- a/sov-rollup-starter/stf/src/builder.rs +++ b/sov-rollup-starter/stf/src/builder.rs @@ -28,7 +28,7 @@ impl StfWithBuilder { }; let storage = ProverStorage::with_config(config).expect("Failed to open prover storage"); - let app = AppTemplate::new(storage.clone(), Runtime::default()); + let app = AppTemplate::new(storage.clone()); let batch_size_bytes = 1024 * 100; // 100 KB let batch_builder = FiFoStrictBatchBuilder::new( batch_size_bytes, diff --git a/sov-rollup-starter/stf/src/runtime.rs b/sov-rollup-starter/stf/src/runtime.rs index 6b645a783..a06ec6492 100644 --- a/sov-rollup-starter/stf/src/runtime.rs +++ b/sov-rollup-starter/stf/src/runtime.rs @@ -11,12 +11,11 @@ pub use sov_bank::{BankRpcImpl, BankRpcServer}; use sov_modules_api::capabilities::{BlobRefOrOwned, BlobSelector}; use sov_modules_api::default_context::ZkDefaultContext; use sov_modules_api::macros::DefaultRuntime; -use sov_modules_api::{Context, DaSpec, DispatchCall, Genesis, MessageCodec, Zkvm}; +use sov_modules_api::{Context, DaSpec, DispatchCall, Genesis, MessageCodec}; use sov_modules_stf_template::AppTemplate; use sov_rollup_interface::da::DaVerifier; #[cfg(feature = "native")] pub use sov_sequencer_registry::{SequencerRegistryRpcImpl, SequencerRegistryRpcServer}; -use sov_state::ZkStorage; use sov_stf_runner::verifier::StateTransitionVerifier; /// The runtime defines the logic of the rollup. @@ -74,6 +73,7 @@ where C: Context, Da: DaSpec, { + type GenesisConfig = GenesisConfig; } // Select which blobs will be executed in this slot. In this implementation simply execute all @@ -92,13 +92,6 @@ impl BlobSelector for Runtime { } } -/// Create the zk version of the STF. -pub fn zk_stf( -) -> AppTemplate> { - let storage = ZkStorage::new(); - AppTemplate::new(storage, Runtime::default()) -} - /// A verifier for the rollup pub type RollupVerifier = StateTransitionVerifier< AppTemplate< diff --git a/sov-rollup-starter/tests/bank/mod.rs b/sov-rollup-starter/tests/bank/mod.rs index d6f40093f..b6aabd9d0 100644 --- a/sov-rollup-starter/tests/bank/mod.rs +++ b/sov-rollup-starter/tests/bank/mod.rs @@ -13,6 +13,7 @@ use sov_sequencer::utils::SimpleClient; use stf_starter::{GenesisPaths, RuntimeCall}; use super::test_helpers::start_rollup; + const TOKEN_SALT: u64 = 0; const TOKEN_NAME: &str = "test_token"; From 893a5605d50fcfc83cd20ab0a8d67d3c63d67a9a Mon Sep 17 00:00:00 2001 From: Preston Evans <32944016+preston-evans98@users.noreply.github.com> Date: Thu, 19 Oct 2023 19:31:34 -0700 Subject: [PATCH 4/5] Re-export anyhow from rollup-interface (#1079) * Re-export anyhow from rollup-interface * fmt --- rollup-interface/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup-interface/src/lib.rs b/rollup-interface/src/lib.rs index 5a780ffea..859337810 100644 --- a/rollup-interface/src/lib.rs +++ b/rollup-interface/src/lib.rs @@ -9,5 +9,5 @@ pub use state_machine::*; mod node; pub use borsh::maybestd; -pub use digest; pub use node::*; +pub use {anyhow, digest}; From 08e842fc9865461375ddf1b4334f7c4411613c49 Mon Sep 17 00:00:00 2001 From: Blazej Kolad Date: Fri, 20 Oct 2023 12:23:22 +0200 Subject: [PATCH 5/5] Delete obsolete file (#1080) --- examples/demo-rollup/src/rollup.rs | 304 ----------------------------- 1 file changed, 304 deletions(-) delete mode 100644 examples/demo-rollup/src/rollup.rs diff --git a/examples/demo-rollup/src/rollup.rs b/examples/demo-rollup/src/rollup.rs deleted file mode 100644 index addccc34f..000000000 --- a/examples/demo-rollup/src/rollup.rs +++ /dev/null @@ -1,304 +0,0 @@ -use std::net::SocketAddr; -use std::path::Path; -use std::str::FromStr; - -use anyhow::Context; -use const_rollup_config::SEQUENCER_DA_ADDRESS; -use demo_stf::genesis_config::{get_genesis_config, GenesisPaths, StorageConfig}; -use demo_stf::runtime::{GenesisConfig, Runtime}; -use demo_stf::{create_zk_app_template, App, AppVerifier}; -use jsonrpsee::RpcModule; -#[cfg(feature = "experimental")] -use secp256k1::SecretKey; -use sov_celestia_adapter::verifier::address::CelestiaAddress; -use sov_celestia_adapter::verifier::{CelestiaVerifier, RollupParams}; -use sov_celestia_adapter::CelestiaService; -#[cfg(feature = "experimental")] -use sov_cli::wallet_state::PrivateKeyAndAddress; -use sov_db::ledger_db::LedgerDB; -#[cfg(feature = "experimental")] -use sov_ethereum::experimental::EthRpcConfig; -#[cfg(feature = "experimental")] -use sov_ethereum::GasPriceOracleConfig; -use sov_modules_api::default_context::{DefaultContext, ZkDefaultContext}; -#[cfg(feature = "experimental")] -use sov_modules_api::default_signature::private_key::DefaultPrivateKey; -use sov_modules_api::Spec; -use sov_modules_stf_template::{AppTemplate, SequencerOutcome, TxEffect}; -use sov_rollup_interface::da::DaSpec; -use sov_rollup_interface::mocks::{ - MockAddress, MockDaConfig, MockDaService, MOCK_SEQUENCER_DA_ADDRESS, -}; -use sov_rollup_interface::services::da::DaService; -use sov_rollup_interface::zk::ZkvmHost; -use sov_stf_runner::{ - from_toml_path, ProofGenConfig, Prover, RollupConfig, RunnerConfig, StateTransitionRunner, -}; -use tokio::sync::oneshot; -use tracing::debug; - -#[cfg(feature = "experimental")] -use crate::register_rpc::register_ethereum; -use crate::register_rpc::register_sequencer; -use crate::{initialize_ledger, ROLLUP_NAMESPACE}; - -#[cfg(feature = "experimental")] -const TX_SIGNER_PRIV_KEY_PATH: &str = "../test-data/keys/tx_signer_private_key.json"; - -type ZkStf = AppTemplate>; - -/// Dependencies needed to run the rollup. -pub struct Rollup { - // Implementation of the STF. - pub(crate) app: App, - // Data availability service. - pub(crate) da_service: Da, - // Ledger db. - pub(crate) ledger_db: LedgerDB, - // Runner configuration. - pub(crate) runner_config: RunnerConfig, - // Initial rollup configuration. - pub(crate) genesis_config: GenesisConfig, - #[cfg(feature = "experimental")] - /// Configuration for the Ethereum RPC. - pub eth_rpc_config: EthRpcConfig, - /// Prover for the rollup. - #[allow(clippy::type_complexity)] - pub(crate) prover: Option, Da, Vm>>, -} - -pub fn configure_prover( - vm: Vm, - cfg: DemoProverConfig, - da_verifier: Da::Verifier, -) -> Prover, Da, Vm> { - let config = match cfg { - DemoProverConfig::Simulate => ProofGenConfig::Simulate(AppVerifier::new( - create_zk_app_template::(), - da_verifier, - )), - DemoProverConfig::Execute => ProofGenConfig::Execute, - DemoProverConfig::Prove => ProofGenConfig::Prover, - }; - Prover { vm, config } -} - -/// The possible configurations of the demo prover -pub enum DemoProverConfig { - /// Run the rollup verification logic inside the current process - Simulate, - /// Run the rollup verifier in a zkVM executor - Execute, - /// Run the rollup verifier and create a SNARK of execution - Prove, -} - -/// Creates celestia based rollup. -pub async fn new_rollup_with_celestia_da>( - rollup_config_path: &str, - prover: Option<(Vm, DemoProverConfig)>, - genesis_paths: &GenesisPaths

, -) -> Result, anyhow::Error> { - debug!( - "Starting demo celestia rollup with config {}", - rollup_config_path - ); - - let rollup_config: RollupConfig = - from_toml_path(rollup_config_path).context("Failed to read rollup configuration")?; - - let ledger_db = initialize_ledger(&rollup_config.storage.path); - - let da_service = CelestiaService::new( - rollup_config.da.clone(), - RollupParams { - namespace: ROLLUP_NAMESPACE, - }, - ) - .await; - - let storage_config = StorageConfig { - path: rollup_config.storage.path, - }; - let app = App::new(storage_config); - let sequencer_da_address = CelestiaAddress::from_str(SEQUENCER_DA_ADDRESS)?; - - #[cfg(feature = "experimental")] - let eth_signer = read_eth_tx_signers(); - let genesis_config = get_genesis_config( - sequencer_da_address, - genesis_paths, - #[cfg(feature = "experimental")] - eth_signer.signers(), - ); - - let prover = prover.map(|(vm, config)| { - configure_prover( - vm, - config, - CelestiaVerifier { - rollup_namespace: ROLLUP_NAMESPACE, - }, - ) - }); - - Ok(Rollup { - app, - da_service, - ledger_db, - runner_config: rollup_config.runner, - genesis_config, - #[cfg(feature = "experimental")] - eth_rpc_config: EthRpcConfig { - min_blob_size: Some(1), - sov_tx_signer_priv_key: read_sov_tx_signer_priv_key()?, - eth_signer, - gas_price_oracle_config: GasPriceOracleConfig::default(), - }, - prover, - }) -} - -/// Creates MockDa based rollup. -pub fn new_rollup_with_mock_da>( - rollup_config_path: &str, - prover: Option<(Vm, DemoProverConfig)>, - genesis_paths: &GenesisPaths

, -) -> Result, anyhow::Error> { - debug!("Starting mock rollup with config {}", rollup_config_path); - - let rollup_config: RollupConfig = - from_toml_path(rollup_config_path).context("Failed to read rollup configuration")?; - - new_rollup_with_mock_da_from_config(rollup_config, prover, genesis_paths) -} - -/// Creates MockDa based rollup. -pub fn new_rollup_with_mock_da_from_config>( - rollup_config: RollupConfig, - prover: Option<(Vm, DemoProverConfig)>, - genesis_paths: &GenesisPaths

, -) -> Result, anyhow::Error> { - let ledger_db = initialize_ledger(&rollup_config.storage.path); - let sequencer_da_address = MockAddress::from(MOCK_SEQUENCER_DA_ADDRESS); - let da_service = MockDaService::new(sequencer_da_address); - - #[cfg(feature = "experimental")] - let eth_signer = read_eth_tx_signers(); - let storage_config = StorageConfig { - path: rollup_config.storage.path, - }; - let app = App::new(storage_config); - let genesis_config = get_genesis_config( - sequencer_da_address, - genesis_paths, - #[cfg(feature = "experimental")] - eth_signer.signers(), - ); - - let prover = prover.map(|(vm, config)| configure_prover(vm, config, Default::default())); - Ok(Rollup { - app, - da_service, - ledger_db, - runner_config: rollup_config.runner, - genesis_config, - #[cfg(feature = "experimental")] - eth_rpc_config: EthRpcConfig { - min_blob_size: Some(1), - sov_tx_signer_priv_key: read_sov_tx_signer_priv_key()?, - eth_signer, - gas_price_oracle_config: GasPriceOracleConfig::default(), - }, - prover, - }) -} - -#[cfg(feature = "experimental")] -/// Ethereum RPC wraps EVM transaction in a rollup transaction. -/// This function reads the private key of the rollup transaction signer. -fn read_sov_tx_signer_priv_key() -> Result { - let data = std::fs::read_to_string(TX_SIGNER_PRIV_KEY_PATH).context("Unable to read file")?; - - let key_and_address: PrivateKeyAndAddress = serde_json::from_str(&data) - .unwrap_or_else(|_| panic!("Unable to convert data {} to PrivateKeyAndAddress", &data)); - - Ok(key_and_address.private_key) -} - -// TODO: #840 -#[cfg(feature = "experimental")] -fn read_eth_tx_signers() -> sov_ethereum::DevSigner { - sov_ethereum::DevSigner::new(vec![SecretKey::from_str( - "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - ) - .unwrap()]) -} - -impl + Clone> Rollup { - /// Runs the rollup. - pub async fn run(self) -> Result<(), anyhow::Error> { - self.run_and_report_rpc_port(None).await - } - - /// Runs the rollup. Reports rpc port to the caller using the provided channel. - pub async fn run_and_report_rpc_port( - mut self, - channel: Option>, - ) -> Result<(), anyhow::Error> { - let storage = self.app.get_storage(); - let last_slot_opt = self.ledger_db.get_head_slot()?; - let prev_root = last_slot_opt - .map(|(number, _)| storage.get_root_hash(number.0)) - .transpose()?; - - let rpc_module = self.rpc_module(storage)?; - - let mut runner = StateTransitionRunner::new( - self.runner_config, - self.da_service, - self.ledger_db, - self.app.stf, - prev_root, - self.genesis_config, - self.prover, - )?; - - runner.start_rpc_server(rpc_module, channel).await; - runner.run_in_process().await?; - - Ok(()) - } - - /// Creates a new [`jsonrpsee::RpcModule`] and registers all RPC methods - /// exposed by the node. - fn rpc_module( - &mut self, - storage: ::Storage, - ) -> anyhow::Result> { - let mut module = - demo_stf::runtime::get_rpc_methods::(storage.clone()); - - module.merge( - sov_ledger_rpc::server::rpc_module::< - LedgerDB, - SequencerOutcome<<::Spec as DaSpec>::Address>, - TxEffect, - >(self.ledger_db.clone())? - .remove_context(), - )?; - register_sequencer(self.da_service.clone(), &mut self.app, &mut module)?; - #[cfg(feature = "experimental")] - { - register_ethereum::( - self.da_service.clone(), - self.eth_rpc_config.clone(), - storage, - &mut module, - )?; - println!("Registered ethereum rpc"); - } - - Ok(module) - } -}