From 99f2e16cf2a35b7e77ba444b54b9698cc4d488f6 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Thu, 19 Oct 2023 20:19:20 +0200 Subject: [PATCH 01/28] feat: add `no-std` to `rollup-interface` This commit introduces a `no-std` build for `rollup-interface`. This is required to create WASM modules for module implementations, having one use-case the web wallet. We follow a similar approach to `borsh` and create a `maybestd` module so dependencies can rely on that for fallback implementations of common stdlib components that are available either on `alloc` or commonly used crates. Most of the requirements exists on `alloc`, but we do have a couple of exceptions. The first is `HashMap`, that we fallback for `hashbrown` when `std` isn't available. We shouldn't expect issues from risc0 integration as `risc0` code will be compiled with `std`. The second is `Mutex`; for that, we fallback to `spin`, another common choice on the Rust ecosystem. Finally, this commit introduces a couple of changes on the root `Cargo.toml` to not use default-features for crates that offers `no-std` support. --- Cargo.toml | 19 +++++----- full-node/db/sov-db/Cargo.toml | 2 +- rollup-interface/Cargo.toml | 38 ++++++++++++------- rollup-interface/src/lib.rs | 19 +++++++++- rollup-interface/src/node/rpc/mod.rs | 20 ++++++---- .../src/node/services/batch_builder.rs | 2 + rollup-interface/src/node/services/da.rs | 4 +- rollup-interface/src/state_machine/da.rs | 12 ++++-- .../src/state_machine/mocks/da.rs | 30 ++++++++------- .../src/state_machine/mocks/mod.rs | 2 +- .../state_machine/mocks/validity_condition.rs | 2 +- .../src/state_machine/mocks/zk_vm.rs | 26 +++++++++---- rollup-interface/src/state_machine/mod.rs | 4 +- rollup-interface/src/state_machine/stf.rs | 1 + .../src/state_machine/stf/fuzzing.rs | 2 + rollup-interface/src/state_machine/zk/mod.rs | 2 +- 16 files changed, 122 insertions(+), 63 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9932ee828..e1d36bf06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,28 +64,29 @@ jmt = "0.8.0" # External dependencies async-trait = "0.1.71" -anyhow = "1.0.68" +anyhow = { version = "1.0.68", default-features = false } arbitrary = { version = "1.3.1", features = ["derive"] } -borsh = { version = "0.10.3", features = ["rc", "bytes"] } +borsh = { version = "0.10.3", default-features = false, features = ["rc", "bytes"] } # TODO: Consider replacing this serialization format # https://github.com/Sovereign-Labs/sovereign-sdk/issues/283 bincode = "1.3.3" bcs = "0.1.5" byteorder = "1.5.0" -bytes = "1.2.1" +bytes = { version = "1.2.1", default-features = false } +digest = { version = "0.10.6", default-features = false, features = ["alloc"] } futures = "0.3" -hex = "0.4.3" +hex = { version = "0.4.3", default-features = false, features = ["alloc", "serde"] } once_cell = "1.10.0" prometheus = { version = "0.13.3", default-features = false } -proptest = "1.3.1" +proptest = { version = "1.3.1", default-features = false, features = ["alloc"] } proptest-derive = "0.3.0" rand = "0.8" rayon = "1.8.0" rocksdb = { version = "0.21.0", features = ["lz4"] } -serde = { version = "1.0.188", features = ["derive", "rc"] } -serde_json = { version = "1.0" } -sha2 = "0.10.6" -digest = "0.10.6" +serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive", "rc"] } +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } +sha2 = { version = "0.10.6", default-features = false } +spin = "0.9.8" thiserror = "1.0.38" tiny-keccak = "2.0.2" tracing = "0.1.37" diff --git a/full-node/db/sov-db/Cargo.toml b/full-node/db/sov-db/Cargo.toml index 650ae3b9f..8a4a653dc 100644 --- a/full-node/db/sov-db/Cargo.toml +++ b/full-node/db/sov-db/Cargo.toml @@ -17,7 +17,7 @@ resolver = "2" # Maintained by sovereign labs jmt = { workspace = true } sov-schema-db = { path = "../sov-schema-db", version = "0.2" } -sov-rollup-interface = { path = "../../../rollup-interface", version = "0.2", features = ["native", "mocks"] } +sov-rollup-interface = { path = "../../../rollup-interface", version = "0.2", features = ["native", "mocks", "tokio"] } # External anyhow = { workspace = true } diff --git a/rollup-interface/Cargo.toml b/rollup-interface/Cargo.toml index 56161665e..55f704be5 100644 --- a/rollup-interface/Cargo.toml +++ b/rollup-interface/Cargo.toml @@ -15,38 +15,48 @@ exclude = [ ] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] +anyhow = { workspace = true, default-features = false } async-trait = { workspace = true } -borsh = { workspace = true, features = ["rc"] } -serde = { workspace = true } +borsh = { workspace = true } bytes = { workspace = true } -hex = { workspace = true, features = ["serde"] } digest = { workspace = true } +hex = { workspace = true } +serde = { workspace = true } sha2 = { workspace = true, optional = true } -thiserror = { workspace = true } +spin = { workspace = true } +thiserror = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } # TODO: Replace with serde-compatible borsh implementation when it becomes availabile # see https://github.com/Sovereign-Labs/sovereign-sdk/issues/215 -bincode = { workspace = true } - -anyhow = { workspace = true } +bincode = { workspace = true, optional = true } # Proptest should be a dev-dependency, but those can't be optional proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -tokio = { workspace = true, optional = true} - [dev-dependencies] -serde_json = "1" +serde_json = { workspace = true } proptest = { workspace = true } proptest-derive = { workspace = true } [features] -default = [] -native = ["tokio"] +default = ["std"] +native = [] fuzzing = ["proptest", "proptest-derive", "sha2"] mocks = ["sha2", "bytes/serde"] +std = [ + "anyhow/default", + "bincode", + "borsh/default", + "bytes/default", + "digest/default", + "hex/default", + "proptest?/default", + "serde/default", + "sha2?/default", + "thiserror" +] +tokio = ["dep:tokio", "std"] diff --git a/rollup-interface/src/lib.rs b/rollup-interface/src/lib.rs index 5a780ffea..7f4195d7f 100644 --- a/rollup-interface/src/lib.rs +++ b/rollup-interface/src/lib.rs @@ -2,12 +2,29 @@ //! It specifies the interfaces which allow the same "business logic" to run on different //! DA layers and be proven with different zkVMS, all while retaining compatibility //! with the same basic full node implementation. + +#![cfg_attr(not(feature = "std"), no_std)] #![deny(missing_docs)] + mod state_machine; pub use state_machine::*; mod node; -pub use borsh::maybestd; pub use digest; pub use node::*; + +/// A facade for the `std` crate. +pub mod maybestd { + pub use borsh::maybestd::{borrow, boxed, collections, format, io, rc, string, vec}; + + /// A facade for the `sync` std module. + pub mod sync { + #[cfg(feature = "std")] + pub use std::sync::Mutex; + + pub use borsh::maybestd::sync::*; + #[cfg(not(feature = "std"))] + pub use spin::Mutex; + } +} diff --git a/rollup-interface/src/node/rpc/mod.rs b/rollup-interface/src/node/rpc/mod.rs index a3cead72c..1a7a598e1 100644 --- a/rollup-interface/src/node/rpc/mod.rs +++ b/rollup-interface/src/node/rpc/mod.rs @@ -3,9 +3,8 @@ #[cfg(feature = "native")] use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -#[cfg(feature = "native")] -use tokio::sync::broadcast::Receiver; +use crate::maybestd::vec::Vec; #[cfg(feature = "native")] use crate::stf::Event; use crate::stf::EventKey; @@ -143,7 +142,7 @@ pub struct SlotResponse { #[serde(with = "utils::rpc_hex")] pub hash: [u8; 32], /// The range of batches in this slot. - pub batch_range: std::ops::Range, + pub batch_range: core::ops::Range, /// The batches in this slot, if the [`QueryMode`] of the request is not `Compact` #[serde(skip_serializing_if = "Option::is_none")] pub batches: Option>>>, @@ -156,7 +155,7 @@ pub struct BatchResponse { #[serde(with = "utils::rpc_hex")] pub hash: [u8; 32], /// The range of transactions in this batch. - pub tx_range: std::ops::Range, + pub tx_range: core::ops::Range, /// The transactions in this batch, if the [`QueryMode`] of the request is not `Compact`. #[serde(skip_serializing_if = "Option::is_none")] pub txs: Option>>>, @@ -172,7 +171,7 @@ pub struct TxResponse { #[serde(with = "utils::rpc_hex")] pub hash: [u8; 32], /// The range of events occurring in this transaction. - pub event_range: std::ops::Range, + pub event_range: core::ops::Range, /// The transaction body, if stored by the rollup. #[serde(skip_serializing_if = "Option::is_none")] pub body: Option>, @@ -303,7 +302,8 @@ pub trait LedgerRpcProvider { ) -> Result>>, anyhow::Error>; /// Get a notification each time a slot is processed - fn subscribe_slots(&self) -> Result, anyhow::Error>; + #[cfg(feature = "tokio")] + fn subscribe_slots(&self) -> Result, anyhow::Error>; } /// JSON-RPC -related utilities. Occasionally useful but unimportant for most @@ -312,12 +312,15 @@ pub mod utils { /// Serialization and deserialization logic for `0x`-prefixed hex strings. pub mod rpc_hex { use core::fmt; - use std::marker::PhantomData; + use core::marker::PhantomData; use hex::{FromHex, ToHex}; use serde::de::{Error, Visitor}; use serde::{Deserializer, Serializer}; + use crate::maybestd::format; + use crate::maybestd::string::String; + /// Serializes `data` as hex string using lowercase characters and prefixing with '0x'. /// /// Lowercase characters are used (e.g. `f9b4ca`). The resulting string's length @@ -382,6 +385,9 @@ pub mod utils { mod rpc_hex_tests { use serde::{Deserialize, Serialize}; + use crate::maybestd::vec; + use crate::maybestd::vec::Vec; + #[derive(Serialize, Deserialize, PartialEq, Debug)] struct TestStruct { #[serde(with = "super::utils::rpc_hex")] diff --git a/rollup-interface/src/node/services/batch_builder.rs b/rollup-interface/src/node/services/batch_builder.rs index bc53b626b..90037aada 100644 --- a/rollup-interface/src/node/services/batch_builder.rs +++ b/rollup-interface/src/node/services/batch_builder.rs @@ -1,5 +1,7 @@ //! This module defines the trait that is used to build batches of transactions. +use crate::maybestd::vec::Vec; + /// BlockBuilder trait is responsible for managing mempool and building batches. pub trait BatchBuilder { /// Accept a new transaction. diff --git a/rollup-interface/src/node/services/da.rs b/rollup-interface/src/node/services/da.rs index 11897803e..29c9b9047 100644 --- a/rollup-interface/src/node/services/da.rs +++ b/rollup-interface/src/node/services/da.rs @@ -1,11 +1,13 @@ //! The da module defines traits used by the full node to interact with the DA layer. -use std::fmt::{self, Display}; +use core::fmt::{self, Display}; use async_trait::async_trait; use serde::de::DeserializeOwned; use serde::Serialize; use crate::da::{BlockHeaderTrait, DaSpec, DaVerifier}; +use crate::maybestd::boxed::Box; +use crate::maybestd::vec::Vec; use crate::zk::ValidityCondition; /// A DaService is the local side of an RPC connection talking to a node of the DA layer diff --git a/rollup-interface/src/state_machine/da.rs b/rollup-interface/src/state_machine/da.rs index 6153b06f0..95c866c71 100644 --- a/rollup-interface/src/state_machine/da.rs +++ b/rollup-interface/src/state_machine/da.rs @@ -1,14 +1,14 @@ //! Defines traits and types used by the rollup to verify claims about the //! DA layer. +use core::cmp::min; use core::fmt::Debug; -use std::cmp::min; use borsh::{BorshDeserialize, BorshSerialize}; use bytes::Buf; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use thiserror::Error; +use crate::maybestd::vec::Vec; use crate::zk::ValidityCondition; use crate::BasicAddress; @@ -212,8 +212,12 @@ pub struct Time { nanos: u32, } -#[derive(Debug, Error)] -#[error("Only intervals less than one second may be represented as nanoseconds")] +#[derive(Debug)] +#[cfg_attr( + feature = "std", + derive(thiserror::Error), + error("Only intervals less than one second may be represented as nanoseconds") +)] /// An error that occurs when trying to create a `NanoSeconds` representing more than one second pub struct ErrTooManyNanos; diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index 5b27dc048..170f23a49 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -1,9 +1,11 @@ -use std::fmt::Display; -use std::str::FromStr; -#[cfg(feature = "native")] -use std::sync::Arc; - -#[cfg(feature = "native")] +use core::fmt::Display; +use core::str::FromStr; + +use crate::maybestd::string::String; +#[cfg(all(feature = "native", feature = "tokio"))] +use crate::maybestd::sync::Arc; +use crate::maybestd::vec::Vec; +#[cfg(all(feature = "native", feature = "tokio"))] use async_trait::async_trait; use borsh::{BorshDeserialize, BorshSerialize}; use bytes::Bytes; @@ -13,7 +15,7 @@ use crate::da::{ BlobReaderTrait, BlockHashTrait, BlockHeaderTrait, CountedBufReader, DaSpec, DaVerifier, Time, }; use crate::mocks::MockValidityCond; -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] use crate::services::da::DaService; use crate::services::da::SlotData; use crate::{BasicAddress, RollupAddress}; @@ -71,7 +73,7 @@ impl FromStr for MockAddress { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - let addr = hex::decode(s)?; + let addr = hex::decode(s).map_err(anyhow::Error::msg)?; if addr.len() != 32 { return Err(anyhow::anyhow!("Invalid address length")); } @@ -108,7 +110,7 @@ impl From<[u8; 32]> for MockAddress { } impl Display for MockAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{}", hex::encode(self.addr)) } } @@ -302,12 +304,12 @@ impl DaSpec for MockDaSpec { type ChainParams = (); } -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] use tokio::sync::mpsc::{self, Receiver, Sender}; -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] use tokio::sync::Mutex; -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] #[derive(Clone)] /// DaService used in tests. pub struct MockDaService { @@ -316,7 +318,7 @@ pub struct MockDaService { sequencer_da_address: MockAddress, } -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] impl MockDaService { /// Creates a new MockDaService. pub fn new(sequencer_da_address: MockAddress) -> Self { @@ -329,7 +331,7 @@ impl MockDaService { } } -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] #[async_trait] impl DaService for MockDaService { type Spec = MockDaSpec; diff --git a/rollup-interface/src/state_machine/mocks/mod.rs b/rollup-interface/src/state_machine/mocks/mod.rs index 3afbfd835..613e5f750 100644 --- a/rollup-interface/src/state_machine/mocks/mod.rs +++ b/rollup-interface/src/state_machine/mocks/mod.rs @@ -4,7 +4,7 @@ mod da; mod validity_condition; mod zk_vm; -#[cfg(feature = "native")] +#[cfg(all(feature = "native", feature = "tokio"))] pub use da::MockDaService; pub use da::{ MockAddress, MockBlob, MockBlock, MockBlockHeader, MockDaConfig, MockDaSpec, MockDaVerifier, diff --git a/rollup-interface/src/state_machine/mocks/validity_condition.rs b/rollup-interface/src/state_machine/mocks/validity_condition.rs index f7ad4ceb2..c0cbd00f1 100644 --- a/rollup-interface/src/state_machine/mocks/validity_condition.rs +++ b/rollup-interface/src/state_machine/mocks/validity_condition.rs @@ -1,4 +1,4 @@ -use std::marker::PhantomData; +use core::marker::PhantomData; use anyhow::Error; use borsh::{BorshDeserialize, BorshSerialize}; diff --git a/rollup-interface/src/state_machine/mocks/zk_vm.rs b/rollup-interface/src/state_machine/mocks/zk_vm.rs index ca9eb4607..8b33e0eee 100644 --- a/rollup-interface/src/state_machine/mocks/zk_vm.rs +++ b/rollup-interface/src/state_machine/mocks/zk_vm.rs @@ -1,11 +1,10 @@ -use std::io::Write; - use anyhow::ensure; use borsh::{BorshDeserialize, BorshSerialize}; -use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use crate::zk::{Matches, Zkvm}; +use crate::maybestd::io; +use crate::maybestd::vec::Vec; +use crate::zk::Matches; /// A mock commitment to a particular zkVM program. #[derive(Debug, Clone, PartialEq, Eq, BorshDeserialize, BorshSerialize, Serialize, Deserialize)] @@ -30,7 +29,7 @@ pub struct MockProof<'a> { impl<'a> MockProof<'a> { /// Serializes a proof into a writer. - pub fn encode(&self, mut writer: impl Write) { + pub fn encode(&self, mut writer: impl io::Write) { writer.write_all(&self.program_id.0).unwrap(); let is_valid_byte = if self.is_valid { 1 } else { 0 }; writer.write_all(&[is_valid_byte]).unwrap(); @@ -61,7 +60,7 @@ impl<'a> MockProof<'a> { /// A mock implementing the zkVM trait. pub struct MockZkvm; -impl Zkvm for MockZkvm { +impl crate::zk::Zkvm for MockZkvm { type CodeCommitment = MockCodeCommitment; type Error = anyhow::Error; @@ -79,10 +78,11 @@ impl Zkvm for MockZkvm { Ok(proof.log) } + #[cfg(feature = "std")] fn verify_and_extract_output< Add: crate::RollupAddress, Da: crate::da::DaSpec, - Root: Serialize + DeserializeOwned, + Root: Serialize + serde::de::DeserializeOwned, >( serialized_proof: &[u8], code_commitment: &Self::CodeCommitment, @@ -90,6 +90,18 @@ impl Zkvm for MockZkvm { let output = Self::verify(serialized_proof, code_commitment)?; Ok(bincode::deserialize(output)?) } + + #[cfg(not(feature = "std"))] + fn verify_and_extract_output< + Add: crate::RollupAddress, + Da: crate::da::DaSpec, + Root: Serialize + serde::de::DeserializeOwned, + >( + _serialized_proof: &[u8], + _code_commitment: &Self::CodeCommitment, + ) -> Result, Self::Error> { + todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") + } } #[test] diff --git a/rollup-interface/src/state_machine/mod.rs b/rollup-interface/src/state_machine/mod.rs index e5242e2e1..95810e5cc 100644 --- a/rollup-interface/src/state_machine/mod.rs +++ b/rollup-interface/src/state_machine/mod.rs @@ -23,10 +23,10 @@ pub trait BasicAddress: + Send + Sync + Clone - + std::hash::Hash + + core::hash::Hash + AsRef<[u8]> + for<'a> TryFrom<&'a [u8], Error = anyhow::Error> - + std::str::FromStr + + core::str::FromStr + Serialize + DeserializeOwned + 'static diff --git a/rollup-interface/src/state_machine/stf.rs b/rollup-interface/src/state_machine/stf.rs index bbe14e726..76ed6a30b 100644 --- a/rollup-interface/src/state_machine/stf.rs +++ b/rollup-interface/src/state_machine/stf.rs @@ -8,6 +8,7 @@ use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use crate::da::DaSpec; +use crate::maybestd::vec::Vec; use crate::zk::{ValidityCondition, Zkvm}; #[cfg(any(all(test, feature = "sha2"), feature = "fuzzing"))] diff --git a/rollup-interface/src/state_machine/stf/fuzzing.rs b/rollup-interface/src/state_machine/stf/fuzzing.rs index 96199f6b2..5d90e5821 100644 --- a/rollup-interface/src/state_machine/stf/fuzzing.rs +++ b/rollup-interface/src/state_machine/stf/fuzzing.rs @@ -6,6 +6,8 @@ use proptest::prelude::{any, Arbitrary}; use proptest::strategy::{BoxedStrategy, Strategy}; use super::{BatchReceipt, Event, TransactionReceipt}; +use crate::maybestd::boxed::Box; +use crate::maybestd::vec::Vec; /// An object-safe hashing trait, which is blanket implemented for all /// [`digest::Digest`] implementors. diff --git a/rollup-interface/src/state_machine/zk/mod.rs b/rollup-interface/src/state_machine/zk/mod.rs index 3cdb0e227..9e20cca55 100644 --- a/rollup-interface/src/state_machine/zk/mod.rs +++ b/rollup-interface/src/state_machine/zk/mod.rs @@ -48,7 +48,7 @@ pub trait Zkvm { + DeserializeOwned; /// The error type which is returned when a proof fails to verify - type Error: Debug + From; + type Error: Debug; /// Interpret a sequence of a bytes as a proof and attempt to verify it against the code commitment. /// If the proof is valid, return a reference to the public outputs of the proof. From 955a38ad83e0b6d853866bbfa792fb02cf35f707 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Thu, 19 Oct 2023 22:51:03 +0200 Subject: [PATCH 02/28] update fmt --- Cargo.lock | 1 + .../provers/risc0/guest-celestia/Cargo.lock | 80 +++++++++ .../provers/risc0/guest-mock/Cargo.lock | 153 ++++++++++++++++++ module-system/sov-modules-api/Cargo.toml | 1 + .../src/state_machine/mocks/da.rs | 8 +- .../provers/risc0/guest-mock/Cargo.lock | 153 ++++++++++++++++++ 6 files changed, 392 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a24fcaf74..0446da770 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8830,6 +8830,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "spin 0.9.8", "thiserror", "tokio", ] diff --git a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock index 8b1d3159e..d72bc1bf2 100644 --- a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock @@ -247,6 +247,21 @@ dependencies = [ "serde", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -822,6 +837,12 @@ dependencies = [ "paste", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "funty" version = "2.0.0" @@ -1091,6 +1112,16 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.20" @@ -1393,6 +1424,8 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ + "bit-set", + "bit-vec", "bitflags 2.4.0", "lazy_static", "num-traits", @@ -1400,6 +1433,8 @@ dependencies = [ "rand_chacha", "rand_xorshift", "regex-syntax", + "rusty-fork", + "tempfile", "unarray", ] @@ -1480,6 +1515,12 @@ dependencies = [ "prost 0.12.1", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.33" @@ -1747,6 +1788,18 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.15" @@ -1777,6 +1830,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "semver" version = "0.11.0" @@ -2110,7 +2169,10 @@ dependencies = [ "bytes", "digest 0.10.7", "hex", + "proptest", "serde", + "sha2 0.10.8", + "spin", "thiserror", ] @@ -2186,6 +2248,15 @@ dependencies = [ "risc0-zkvm-platform", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.2" @@ -2469,6 +2540,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock index a0c622f9e..0caaaa282 100644 --- a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock @@ -67,6 +67,21 @@ dependencies = [ "serde", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -367,6 +382,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "generic-array" version = "0.14.7" @@ -479,6 +500,12 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + [[package]] name = "libc" version = "0.2.148" @@ -497,6 +524,16 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.20" @@ -538,6 +575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -564,6 +602,12 @@ version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -582,6 +626,26 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.0", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "prost" version = "0.11.9" @@ -605,6 +669,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.33" @@ -614,11 +684,44 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] [[package]] name = "redox_syscall" @@ -629,6 +732,12 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + [[package]] name = "ripemd" version = "0.1.3" @@ -750,6 +859,18 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.15" @@ -780,6 +901,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "semver" version = "1.0.18" @@ -1025,8 +1152,10 @@ dependencies = [ "bytes", "digest", "hex", + "proptest", "serde", "sha2", + "spin", "thiserror", ] @@ -1091,6 +1220,15 @@ dependencies = [ "risc0-zkvm-platform", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + [[package]] name = "subtle" version = "2.5.0" @@ -1199,6 +1337,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-ident" version = "1.0.12" @@ -1211,6 +1355,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/module-system/sov-modules-api/Cargo.toml b/module-system/sov-modules-api/Cargo.toml index dcb6a207a..62b2a3e1d 100644 --- a/module-system/sov-modules-api/Cargo.toml +++ b/module-system/sov-modules-api/Cargo.toml @@ -51,6 +51,7 @@ arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", + "proptest/default", "sov-state/arbitrary", ] bench = ["sov-zk-cycle-macros", "risc0-zkvm", "risc0-zkvm-platform"] diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index 170f23a49..7e68af7e1 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -1,10 +1,6 @@ use core::fmt::Display; use core::str::FromStr; -use crate::maybestd::string::String; -#[cfg(all(feature = "native", feature = "tokio"))] -use crate::maybestd::sync::Arc; -use crate::maybestd::vec::Vec; #[cfg(all(feature = "native", feature = "tokio"))] use async_trait::async_trait; use borsh::{BorshDeserialize, BorshSerialize}; @@ -14,6 +10,10 @@ use serde::{Deserialize, Serialize}; use crate::da::{ BlobReaderTrait, BlockHashTrait, BlockHeaderTrait, CountedBufReader, DaSpec, DaVerifier, Time, }; +use crate::maybestd::string::String; +#[cfg(all(feature = "native", feature = "tokio"))] +use crate::maybestd::sync::Arc; +use crate::maybestd::vec::Vec; use crate::mocks::MockValidityCond; #[cfg(all(feature = "native", feature = "tokio"))] use crate::services::da::DaService; diff --git a/sov-rollup-starter/provers/risc0/guest-mock/Cargo.lock b/sov-rollup-starter/provers/risc0/guest-mock/Cargo.lock index 2d87daccd..694c6650d 100644 --- a/sov-rollup-starter/provers/risc0/guest-mock/Cargo.lock +++ b/sov-rollup-starter/provers/risc0/guest-mock/Cargo.lock @@ -67,6 +67,21 @@ dependencies = [ "serde", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -343,6 +358,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "generic-array" version = "0.14.7" @@ -467,6 +488,12 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + [[package]] name = "libc" version = "0.2.148" @@ -485,6 +512,16 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.20" @@ -526,6 +563,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -552,6 +590,12 @@ version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -570,6 +614,26 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.0", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "prost" version = "0.11.9" @@ -593,6 +657,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.33" @@ -602,11 +672,44 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] [[package]] name = "redox_syscall" @@ -617,6 +720,12 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + [[package]] name = "ripemd" version = "0.1.3" @@ -738,6 +847,18 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.15" @@ -768,6 +889,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "semver" version = "1.0.20" @@ -948,8 +1075,10 @@ dependencies = [ "bytes", "digest", "hex", + "proptest", "serde", "sha2", + "spin", "thiserror", ] @@ -1002,6 +1131,15 @@ dependencies = [ "risc0-zkvm-platform", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + [[package]] name = "stf-starter" version = "0.2.0" @@ -1128,6 +1266,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-ident" version = "1.0.12" @@ -1140,6 +1284,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" From 5bf960838f8cec86cf78165fe7f9f5c08267a078 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Thu, 19 Oct 2023 23:07:38 +0200 Subject: [PATCH 03/28] add to_string to mock da test --- rollup-interface/src/state_machine/mocks/da.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index 7e68af7e1..597baf57b 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -414,6 +414,7 @@ impl DaVerifier for MockDaVerifier { #[cfg(test)] mod tests { use super::*; + use crate::maybestd::string::ToString; #[test] fn test_mock_address_string() { From 70283d80044d9e2a3d6cf5b55bbd6ced758fc246 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 00:26:14 +0200 Subject: [PATCH 04/28] fix sov-schema-db --- full-node/db/sov-schema-db/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index bd9a5745c..273d035bd 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" [dependencies] # External dependencies -anyhow = { workspace = true } +anyhow = { workspace = true, default-features = true } once_cell = { workspace = true } prometheus = { workspace = true } rocksdb = { workspace = true } From a9a3944fc19ec7cc38d0fc3d6f8195d0e8317aa1 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 11:10:23 +0200 Subject: [PATCH 05/28] add `no-std` to `sov-schema-db` --- Cargo.lock | 1 + Cargo.toml | 4 +- full-node/db/sov-schema-db/Cargo.toml | 32 +- full-node/db/sov-schema-db/src/db.rs | 251 ++++++++++++++++ full-node/db/sov-schema-db/src/iterator.rs | 31 +- full-node/db/sov-schema-db/src/lib.rs | 328 +++++---------------- 6 files changed, 369 insertions(+), 278 deletions(-) create mode 100644 full-node/db/sov-schema-db/src/db.rs diff --git a/Cargo.lock b/Cargo.lock index 0446da770..3219d2642 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8874,6 +8874,7 @@ dependencies = [ "once_cell", "prometheus 0.13.3", "rocksdb", + "sov-rollup-interface", "tempfile", "thiserror", "tracing", diff --git a/Cargo.toml b/Cargo.toml index e1d36bf06..5bc459b93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ bytes = { version = "1.2.1", default-features = false } digest = { version = "0.10.6", default-features = false, features = ["alloc"] } futures = "0.3" hex = { version = "0.4.3", default-features = false, features = ["alloc", "serde"] } -once_cell = "1.10.0" +once_cell = { version = "1.10.0", default-features = false, features = ["alloc"] } prometheus = { version = "0.13.3", default-features = false } proptest = { version = "1.3.1", default-features = false, features = ["alloc"] } proptest-derive = "0.3.0" @@ -89,7 +89,7 @@ sha2 = { version = "0.10.6", default-features = false } spin = "0.9.8" thiserror = "1.0.38" tiny-keccak = "2.0.2" -tracing = "0.1.37" +tracing = { version = "0.1.37", default-features = false } bech32 = "0.9.1" derive_more = "0.99.11" clap = { version = "4.2.7", features = ["derive"] } diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index 273d035bd..3e2255819 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -14,13 +14,35 @@ readme = "README.md" [dependencies] # External dependencies -anyhow = { workspace = true, default-features = true } -once_cell = { workspace = true } -prometheus = { workspace = true } -rocksdb = { workspace = true } +anyhow = { workspace = true } +once_cell = { workspace = true, optional = true, default-features = true } +prometheus = { workspace = true, optional = true } +rocksdb = { workspace = true, optional = true } tracing = { workspace = true } -thiserror = { workspace = true } +thiserror = { workspace = true, optional = true } + +sov-rollup-interface = { path = "../../../rollup-interface", default-features = false } [dev-dependencies] byteorder = { workspace = true } tempfile = { workspace = true } + +[features] +default = ["std"] +std = [ + "anyhow/default", + "once_cell", + "prometheus", + "rocksdb", + "sov-rollup-interface/default", + "thiserror", + "tracing/default", +] + +[[test]] +name = "db_test" +required-features = ["std"] + +[[test]] +name = "iterator_test" +required-features = ["std"] diff --git a/full-node/db/sov-schema-db/src/db.rs b/full-node/db/sov-schema-db/src/db.rs new file mode 100644 index 000000000..c7c2dbb07 --- /dev/null +++ b/full-node/db/sov-schema-db/src/db.rs @@ -0,0 +1,251 @@ +use std::path::Path; + +use anyhow::format_err; +use rocksdb::ReadOptions; +use tracing::info; + +use crate::iterator::{ScanDirection, SchemaIterator}; +use crate::metrics::{ + SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, SCHEMADB_DELETES, + SCHEMADB_GET_BYTES, SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, +}; +use crate::schema::Schema; +use crate::schema::{ColumnFamilyName, KeyCodec, ValueCodec}; +use crate::{SchemaBatch, WriteOp}; + +/// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to +/// [`Schema`]s. +#[derive(Debug)] +pub struct DB { + name: &'static str, // for logging + inner: rocksdb::DB, +} + +impl DB { + /// Opens a database backed by RocksDB, using the provided column family names and default + /// column family options. + pub fn open( + path: impl AsRef, + name: &'static str, + column_families: impl IntoIterator>, + db_opts: &rocksdb::Options, + ) -> anyhow::Result { + let db = DB::open_with_cfds( + db_opts, + path, + name, + column_families.into_iter().map(|cf_name| { + let mut cf_opts = rocksdb::Options::default(); + cf_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); + rocksdb::ColumnFamilyDescriptor::new(cf_name, cf_opts) + }), + )?; + Ok(db) + } + + /// Open RocksDB with the provided column family descriptors. + /// This allows to configure options for each column family. + pub fn open_with_cfds( + db_opts: &rocksdb::Options, + path: impl AsRef, + name: &'static str, + cfds: impl IntoIterator, + ) -> anyhow::Result { + let inner = rocksdb::DB::open_cf_descriptors(db_opts, path, cfds)?; + Ok(Self::log_construct(name, inner)) + } + + /// Open db in readonly mode. This db is completely static, so any writes that occur on the primary + /// after it has been opened will not be visible to the readonly instance. + pub fn open_cf_readonly( + opts: &rocksdb::Options, + path: impl AsRef, + name: &'static str, + cfs: Vec, + ) -> anyhow::Result { + let error_if_log_file_exists = false; + let inner = rocksdb::DB::open_cf_for_read_only(opts, path, cfs, error_if_log_file_exists)?; + + Ok(Self::log_construct(name, inner)) + } + + /// Open db in secondary mode. A secondary db is does not support writes, but can be dynamically caught up + /// to the primary instance by a manual call. See + /// for more details. + pub fn open_cf_as_secondary>( + opts: &rocksdb::Options, + primary_path: P, + secondary_path: P, + name: &'static str, + cfs: Vec, + ) -> anyhow::Result { + let inner = rocksdb::DB::open_cf_as_secondary(opts, primary_path, secondary_path, cfs)?; + Ok(Self::log_construct(name, inner)) + } + + fn log_construct(name: &'static str, inner: rocksdb::DB) -> DB { + info!(rocksdb_name = name, "Opened RocksDB."); + DB { name, inner } + } + + /// Reads single record by key. + pub fn get( + &self, + schema_key: &impl KeyCodec, + ) -> anyhow::Result> { + let _timer = SCHEMADB_GET_LATENCY_SECONDS + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .start_timer(); + + let k = schema_key.encode_key()?; + let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; + + let result = self.inner.get_cf(cf_handle, k)?; + SCHEMADB_GET_BYTES + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .observe(result.as_ref().map_or(0.0, |v| v.len() as f64)); + + result + .map(|raw_value| >::decode_value(&raw_value)) + .transpose() + .map_err(|err| err.into()) + } + + /// Writes single record. + pub fn put( + &self, + key: &impl KeyCodec, + value: &impl ValueCodec, + ) -> anyhow::Result<()> { + // Not necessary to use a batch, but we'd like a central place to bump counters. + // Used in tests only anyway. + let batch = SchemaBatch::new(); + batch.put::(key, value)?; + self.write_schemas(batch) + } + + fn iter_with_direction( + &self, + opts: ReadOptions, + direction: ScanDirection, + ) -> anyhow::Result> { + let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; + Ok(SchemaIterator::new( + self.inner.raw_iterator_cf_opt(cf_handle, opts), + direction, + )) + } + + /// Returns a forward [`SchemaIterator`] on a certain schema with the default read options. + pub fn iter(&self) -> anyhow::Result> { + self.iter_with_direction::(Default::default(), ScanDirection::Forward) + } + + /// Returns a forward [`SchemaIterator`] on a certain schema with the provided read options. + pub fn iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { + self.iter_with_direction::(opts, ScanDirection::Forward) + } + + /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. + pub fn rev_iter(&self) -> anyhow::Result> { + self.iter_with_direction::(Default::default(), ScanDirection::Backward) + } + + /// Returns a backward [`SchemaIterator`] on a certain schema with the provided read options. + pub fn rev_iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { + self.iter_with_direction::(opts, ScanDirection::Backward) + } + + /// Writes a group of records wrapped in a [`SchemaBatch`]. + pub fn write_schemas(&self, batch: SchemaBatch) -> anyhow::Result<()> { + let _timer = SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS + .with_label_values(&[self.name]) + .start_timer(); + let rows_locked = batch.rows.lock().expect("Lock must not be poisoned"); + + let mut db_batch = rocksdb::WriteBatch::default(); + for (cf_name, rows) in rows_locked.iter() { + let cf_handle = self.get_cf_handle(cf_name)?; + for write_op in rows { + match write_op { + WriteOp::Value { key, value } => db_batch.put_cf(cf_handle, key, value), + WriteOp::Deletion { key } => db_batch.delete_cf(cf_handle, key), + } + } + } + let serialized_size = db_batch.size_in_bytes(); + + self.inner.write_opt(db_batch, &default_write_options())?; + + // Bump counters only after DB write succeeds. + for (cf_name, rows) in rows_locked.iter() { + for write_op in rows { + match write_op { + WriteOp::Value { key, value } => { + SCHEMADB_PUT_BYTES + .with_label_values(&[cf_name]) + .observe((key.len() + value.len()) as f64); + } + WriteOp::Deletion { key: _ } => { + SCHEMADB_DELETES.with_label_values(&[cf_name]).inc(); + } + } + } + } + SCHEMADB_BATCH_COMMIT_BYTES + .with_label_values(&[self.name]) + .observe(serialized_size as f64); + + Ok(()) + } + + fn get_cf_handle(&self, cf_name: &str) -> anyhow::Result<&rocksdb::ColumnFamily> { + self.inner.cf_handle(cf_name).ok_or_else(|| { + format_err!( + "DB::cf_handle not found for column family name: {}", + cf_name + ) + }) + } + + /// Flushes [MemTable](https://github.com/facebook/rocksdb/wiki/MemTable) data. + /// This is only used for testing `get_approximate_sizes_cf` in unit tests. + pub fn flush_cf(&self, cf_name: &str) -> anyhow::Result<()> { + Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) + } + + /// Returns the current RocksDB property value for the provided column family name + /// and property name. + pub fn get_property(&self, cf_name: &str, property_name: &str) -> anyhow::Result { + self.inner + .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? + .ok_or_else(|| { + format_err!( + "Unable to get property \"{}\" of column family \"{}\".", + property_name, + cf_name, + ) + }) + } + + /// Creates new physical DB checkpoint in directory specified by `path`. + pub fn create_checkpoint>(&self, path: P) -> anyhow::Result<()> { + rocksdb::checkpoint::Checkpoint::new(&self.inner)?.create_checkpoint(path)?; + Ok(()) + } +} + +/// For now we always use synchronous writes. This makes sure that once the operation returns +/// `Ok(())` the data is persisted even if the machine crashes. In the future we might consider +/// selectively turning this off for some non-critical writes to improve performance. +fn default_write_options() -> rocksdb::WriteOptions { + let mut opts = rocksdb::WriteOptions::default(); + opts.set_sync(true); + opts +} diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index be288e639..3e0e88c64 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,10 +1,8 @@ -use std::iter::FusedIterator; -use std::marker::PhantomData; - -use anyhow::Result; - +#[cfg(feature = "std")] use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; -use crate::schema::{KeyDecoder, Schema, ValueCodec}; +use crate::schema::Schema; +#[cfg(feature = "std")] +use crate::schema::{KeyDecoder, ValueCodec}; /// This defines a type that can be used to seek a [`SchemaIterator`], via /// interfaces like [`SchemaIterator::seek`]. Mind you, not all @@ -28,6 +26,8 @@ pub trait SeekKeyEncoder: Sized { fn encode_seek_key(&self) -> crate::schema::Result>; } +// unused outside `std` +#[cfg(feature = "std")] pub(crate) enum ScanDirection { Forward, Backward, @@ -35,12 +35,14 @@ pub(crate) enum ScanDirection { /// DB Iterator parameterized on [`Schema`] that seeks with [`Schema::Key`] and yields /// [`Schema::Key`] and [`Schema::Value`] pairs. +#[cfg(feature = "std")] pub struct SchemaIterator<'a, S> { db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection, - phantom: PhantomData, + phantom: core::marker::PhantomData, } +#[cfg(feature = "std")] impl<'a, S> SchemaIterator<'a, S> where S: Schema, @@ -49,7 +51,7 @@ where SchemaIterator { db_iter, direction, - phantom: PhantomData, + phantom: core::marker::PhantomData, } } @@ -65,7 +67,7 @@ where /// Seeks to the first key whose binary representation is equal to or greater than that of the /// `seek_key`. - pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> Result<()> { + pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { let key = seek_key.encode_seek_key()?; self.db_iter.seek(&key); Ok(()) @@ -75,13 +77,13 @@ where /// `seek_key`. /// /// See example in [`RocksDB doc`](https://github.com/facebook/rocksdb/wiki/SeekForPrev). - pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> Result<()> { + pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { let key = seek_key.encode_seek_key()?; self.db_iter.seek_for_prev(&key); Ok(()) } - fn next_impl(&mut self) -> Result> { + fn next_impl(&mut self) -> anyhow::Result> { let _timer = SCHEMADB_ITER_LATENCY_SECONDS .with_label_values(&[S::COLUMN_FAMILY_NAME]) .start_timer(); @@ -93,6 +95,7 @@ where let raw_key = self.db_iter.key().expect("db_iter.key() failed."); let raw_value = self.db_iter.value().expect("db_iter.value() failed."); + SCHEMADB_ITER_BYTES .with_label_values(&[S::COLUMN_FAMILY_NAME]) .observe((raw_key.len() + raw_value.len()) as f64); @@ -109,15 +112,17 @@ where } } +#[cfg(feature = "std")] impl<'a, S> Iterator for SchemaIterator<'a, S> where S: Schema, { - type Item = Result<(S::Key, S::Value)>; + type Item = anyhow::Result<(S::Key, S::Value)>; fn next(&mut self) -> Option { self.next_impl().transpose() } } -impl<'a, S> FusedIterator for SchemaIterator<'a, S> where S: Schema {} +#[cfg(feature = "std")] +impl<'a, S> core::iter::FusedIterator for SchemaIterator<'a, S> where S: Schema {} diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index 3a39d64d3..afe4d5530 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -14,259 +14,28 @@ //! [`define_schema!`] macro to define the schema name, the types of key and value, and name of the //! column family. +#[cfg(feature = "std")] +mod db; +#[cfg(feature = "std")] +pub use db::DB; mod iterator; +#[cfg(feature = "std")] mod metrics; pub mod schema; -use std::collections::HashMap; -use std::path::Path; -use std::sync::Mutex; - -use anyhow::format_err; -use iterator::ScanDirection; -pub use iterator::{SchemaIterator, SeekKeyEncoder}; -use metrics::{ - SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, - SCHEMADB_BATCH_PUT_LATENCY_SECONDS, SCHEMADB_DELETES, SCHEMADB_GET_BYTES, - SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, -}; -use rocksdb::ReadOptions; -pub use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; -use thiserror::Error; -use tracing::info; +#[cfg(feature = "std")] +pub use iterator::SchemaIterator; +pub use iterator::SeekKeyEncoder; +use sov_rollup_interface::maybestd::collections::HashMap; +use sov_rollup_interface::maybestd::io; +use sov_rollup_interface::maybestd::sync::Mutex; +use sov_rollup_interface::maybestd::vec::Vec; pub use crate::schema::Schema; use crate::schema::{ColumnFamilyName, KeyCodec, ValueCodec}; -/// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to -/// [`Schema`]s. -#[derive(Debug)] -pub struct DB { - name: &'static str, // for logging - inner: rocksdb::DB, -} - -impl DB { - /// Opens a database backed by RocksDB, using the provided column family names and default - /// column family options. - pub fn open( - path: impl AsRef, - name: &'static str, - column_families: impl IntoIterator>, - db_opts: &rocksdb::Options, - ) -> anyhow::Result { - let db = DB::open_with_cfds( - db_opts, - path, - name, - column_families.into_iter().map(|cf_name| { - let mut cf_opts = rocksdb::Options::default(); - cf_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); - rocksdb::ColumnFamilyDescriptor::new(cf_name, cf_opts) - }), - )?; - Ok(db) - } - - /// Open RocksDB with the provided column family descriptors. - /// This allows to configure options for each column family. - pub fn open_with_cfds( - db_opts: &rocksdb::Options, - path: impl AsRef, - name: &'static str, - cfds: impl IntoIterator, - ) -> anyhow::Result { - let inner = rocksdb::DB::open_cf_descriptors(db_opts, path, cfds)?; - Ok(Self::log_construct(name, inner)) - } - - /// Open db in readonly mode. This db is completely static, so any writes that occur on the primary - /// after it has been opened will not be visible to the readonly instance. - pub fn open_cf_readonly( - opts: &rocksdb::Options, - path: impl AsRef, - name: &'static str, - cfs: Vec, - ) -> anyhow::Result { - let error_if_log_file_exists = false; - let inner = rocksdb::DB::open_cf_for_read_only(opts, path, cfs, error_if_log_file_exists)?; - - Ok(Self::log_construct(name, inner)) - } - - /// Open db in secondary mode. A secondary db is does not support writes, but can be dynamically caught up - /// to the primary instance by a manual call. See - /// for more details. - pub fn open_cf_as_secondary>( - opts: &rocksdb::Options, - primary_path: P, - secondary_path: P, - name: &'static str, - cfs: Vec, - ) -> anyhow::Result { - let inner = rocksdb::DB::open_cf_as_secondary(opts, primary_path, secondary_path, cfs)?; - Ok(Self::log_construct(name, inner)) - } - - fn log_construct(name: &'static str, inner: rocksdb::DB) -> DB { - info!(rocksdb_name = name, "Opened RocksDB."); - DB { name, inner } - } - - /// Reads single record by key. - pub fn get( - &self, - schema_key: &impl KeyCodec, - ) -> anyhow::Result> { - let _timer = SCHEMADB_GET_LATENCY_SECONDS - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .start_timer(); - - let k = schema_key.encode_key()?; - let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; - - let result = self.inner.get_cf(cf_handle, k)?; - SCHEMADB_GET_BYTES - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .observe(result.as_ref().map_or(0.0, |v| v.len() as f64)); - - result - .map(|raw_value| >::decode_value(&raw_value)) - .transpose() - .map_err(|err| err.into()) - } - - /// Writes single record. - pub fn put( - &self, - key: &impl KeyCodec, - value: &impl ValueCodec, - ) -> anyhow::Result<()> { - // Not necessary to use a batch, but we'd like a central place to bump counters. - // Used in tests only anyway. - let batch = SchemaBatch::new(); - batch.put::(key, value)?; - self.write_schemas(batch) - } - - fn iter_with_direction( - &self, - opts: ReadOptions, - direction: ScanDirection, - ) -> anyhow::Result> { - let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; - Ok(SchemaIterator::new( - self.inner.raw_iterator_cf_opt(cf_handle, opts), - direction, - )) - } - - /// Returns a forward [`SchemaIterator`] on a certain schema with the default read options. - pub fn iter(&self) -> anyhow::Result> { - self.iter_with_direction::(Default::default(), ScanDirection::Forward) - } - - /// Returns a forward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn iter_with_opts( - &self, - opts: ReadOptions, - ) -> anyhow::Result> { - self.iter_with_direction::(opts, ScanDirection::Forward) - } - - /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. - pub fn rev_iter(&self) -> anyhow::Result> { - self.iter_with_direction::(Default::default(), ScanDirection::Backward) - } - - /// Returns a backward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn rev_iter_with_opts( - &self, - opts: ReadOptions, - ) -> anyhow::Result> { - self.iter_with_direction::(opts, ScanDirection::Backward) - } - - /// Writes a group of records wrapped in a [`SchemaBatch`]. - pub fn write_schemas(&self, batch: SchemaBatch) -> anyhow::Result<()> { - let _timer = SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS - .with_label_values(&[self.name]) - .start_timer(); - let rows_locked = batch.rows.lock().expect("Lock must not be poisoned"); - - let mut db_batch = rocksdb::WriteBatch::default(); - for (cf_name, rows) in rows_locked.iter() { - let cf_handle = self.get_cf_handle(cf_name)?; - for write_op in rows { - match write_op { - WriteOp::Value { key, value } => db_batch.put_cf(cf_handle, key, value), - WriteOp::Deletion { key } => db_batch.delete_cf(cf_handle, key), - } - } - } - let serialized_size = db_batch.size_in_bytes(); - - self.inner.write_opt(db_batch, &default_write_options())?; - - // Bump counters only after DB write succeeds. - for (cf_name, rows) in rows_locked.iter() { - for write_op in rows { - match write_op { - WriteOp::Value { key, value } => { - SCHEMADB_PUT_BYTES - .with_label_values(&[cf_name]) - .observe((key.len() + value.len()) as f64); - } - WriteOp::Deletion { key: _ } => { - SCHEMADB_DELETES.with_label_values(&[cf_name]).inc(); - } - } - } - } - SCHEMADB_BATCH_COMMIT_BYTES - .with_label_values(&[self.name]) - .observe(serialized_size as f64); - - Ok(()) - } - - fn get_cf_handle(&self, cf_name: &str) -> anyhow::Result<&rocksdb::ColumnFamily> { - self.inner.cf_handle(cf_name).ok_or_else(|| { - format_err!( - "DB::cf_handle not found for column family name: {}", - cf_name - ) - }) - } - - /// Flushes [MemTable](https://github.com/facebook/rocksdb/wiki/MemTable) data. - /// This is only used for testing `get_approximate_sizes_cf` in unit tests. - pub fn flush_cf(&self, cf_name: &str) -> anyhow::Result<()> { - Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) - } - - /// Returns the current RocksDB property value for the provided column family name - /// and property name. - pub fn get_property(&self, cf_name: &str, property_name: &str) -> anyhow::Result { - self.inner - .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? - .ok_or_else(|| { - format_err!( - "Unable to get property \"{}\" of column family \"{}\".", - property_name, - cf_name, - ) - }) - } - - /// Creates new physical DB checkpoint in directory specified by `path`. - pub fn create_checkpoint>(&self, path: P) -> anyhow::Result<()> { - rocksdb::checkpoint::Checkpoint::new(&self.inner)?.create_checkpoint(path)?; - Ok(()) - } -} - #[derive(Debug)] +#[cfg_attr(not(feature = "std"), allow(dead_code))] enum WriteOp { Value { key: Vec, value: Vec }, Deletion { key: Vec }, @@ -292,11 +61,15 @@ impl SchemaBatch { key: &impl KeyCodec, value: &impl ValueCodec, ) -> anyhow::Result<()> { - let _timer = SCHEMADB_BATCH_PUT_LATENCY_SECONDS + #[cfg(feature = "std")] + let _timer = metrics::SCHEMADB_BATCH_PUT_LATENCY_SECONDS .with_label_values(&["unknown"]) .start_timer(); + let key = key.encode_key()?; let value = value.encode_value()?; + + #[cfg(feature = "std")] self.rows .lock() .expect("Lock must not be poisoned") @@ -304,12 +77,21 @@ impl SchemaBatch { .or_default() .push(WriteOp::Value { key, value }); + #[cfg(not(feature = "std"))] + self.rows + .lock() + .entry(S::COLUMN_FAMILY_NAME) + .or_default() + .push(WriteOp::Value { key, value }); + Ok(()) } /// Adds a delete operation to the batch. pub fn delete(&self, key: &impl KeyCodec) -> anyhow::Result<()> { let key = key.encode_key()?; + + #[cfg(feature = "std")] self.rows .lock() .expect("Lock must not be poisoned") @@ -317,33 +99,63 @@ impl SchemaBatch { .or_default() .push(WriteOp::Deletion { key }); + #[cfg(not(feature = "std"))] + self.rows + .lock() + .entry(S::COLUMN_FAMILY_NAME) + .or_default() + .push(WriteOp::Deletion { key }); + Ok(()) } } /// An error that occurred during (de)serialization of a [`Schema`]'s keys or /// values. -#[derive(Error, Debug)] +#[derive(Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum CodecError { /// Unable to deserialize a key because it has a different length than /// expected. - #[error("Invalid key length. Expected {expected:}, got {got:}")] + #[cfg_attr( + feature = "std", + error("Invalid key length. Expected {expected:}, got {got:}") + )] #[allow(missing_docs)] // The fields' names are self-explanatory. InvalidKeyLength { expected: usize, got: usize }, /// Some other error occurred when (de)serializing a key or value. Inspect /// the inner [`anyhow::Error`] for more details. - #[error(transparent)] - Wrapped(#[from] anyhow::Error), + #[cfg_attr(feature = "std", error(transparent))] + Wrapped(#[cfg_attr(feature = "std", from)] anyhow::Error), /// I/O error. - #[error(transparent)] - Io(#[from] std::io::Error), + #[cfg_attr(feature = "std", error(transparent))] + Io(#[cfg_attr(feature = "std", from)] io::Error), } -/// For now we always use synchronous writes. This makes sure that once the operation returns -/// `Ok(())` the data is persisted even if the machine crashes. In the future we might consider -/// selectively turning this off for some non-critical writes to improve performance. -fn default_write_options() -> rocksdb::WriteOptions { - let mut opts = rocksdb::WriteOptions::default(); - opts.set_sync(true); - opts +#[cfg(not(feature = "std"))] +impl core::fmt::Display for CodecError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{:?}", self) + } +} + +#[cfg(not(feature = "std"))] +impl From for anyhow::Error { + fn from(e: CodecError) -> Self { + anyhow::Error::msg(e) + } +} + +#[cfg(not(feature = "std"))] +impl From for CodecError { + fn from(e: anyhow::Error) -> Self { + CodecError::Wrapped(e) + } +} + +#[cfg(not(feature = "std"))] +impl From for CodecError { + fn from(e: io::Error) -> Self { + CodecError::Io(e) + } } From 58395685f718439ff0f8904d7440f763cd0e7a34 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 11:17:03 +0200 Subject: [PATCH 06/28] restore test-data --- .../genesis/demo-tests/accounts.json | 3 +++ .../test-data/genesis/demo-tests/bank.json | 17 +++++++++++++ .../genesis/demo-tests/chain_state.json | 7 ++++++ .../test-data/genesis/demo-tests/evm.json | 25 +++++++++++++++++++ .../test-data/genesis/demo-tests/nft.json | 1 + .../demo-tests/sequencer_registry.json | 9 +++++++ .../genesis/demo-tests/value_setter.json | 3 +++ .../genesis/integration-tests/accounts.json | 3 +++ .../genesis/integration-tests/bank.json | 17 +++++++++++++ .../integration-tests/chain_state.json | 7 ++++++ .../genesis/integration-tests/evm.json | 25 +++++++++++++++++++ .../genesis/integration-tests/nft.json | 1 + .../integration-tests/sequencer_registry.json | 9 +++++++ .../integration-tests/value_setter.json | 3 +++ .../test-data/keys/minter_private_key.json | 9 +++++++ .../keys/token_deployer_private_key.json | 9 +++++++ .../test-data/keys/tx_signer_private_key.json | 9 +++++++ examples/test-data/requests/burn.json | 8 ++++++ examples/test-data/requests/create_token.json | 12 +++++++++ examples/test-data/requests/mint.json | 9 +++++++ .../requests/nft/create_collection.json | 6 +++++ .../requests/nft/freeze_collection.json | 5 ++++ examples/test-data/requests/nft/mint_nft.json | 9 +++++++ .../test-data/requests/nft/transfer_nft.json | 7 ++++++ .../requests/nft/update_token_uri.json | 8 ++++++ .../requests/register_sequencer.json | 8 ++++++ examples/test-data/requests/transfer.json | 9 +++++++ 27 files changed, 238 insertions(+) create mode 100644 examples/test-data/genesis/demo-tests/accounts.json create mode 100644 examples/test-data/genesis/demo-tests/bank.json create mode 100644 examples/test-data/genesis/demo-tests/chain_state.json create mode 100644 examples/test-data/genesis/demo-tests/evm.json create mode 100644 examples/test-data/genesis/demo-tests/nft.json create mode 100644 examples/test-data/genesis/demo-tests/sequencer_registry.json create mode 100644 examples/test-data/genesis/demo-tests/value_setter.json create mode 100644 examples/test-data/genesis/integration-tests/accounts.json create mode 100644 examples/test-data/genesis/integration-tests/bank.json create mode 100644 examples/test-data/genesis/integration-tests/chain_state.json create mode 100644 examples/test-data/genesis/integration-tests/evm.json create mode 100644 examples/test-data/genesis/integration-tests/nft.json create mode 100644 examples/test-data/genesis/integration-tests/sequencer_registry.json create mode 100644 examples/test-data/genesis/integration-tests/value_setter.json create mode 100644 examples/test-data/keys/minter_private_key.json create mode 100644 examples/test-data/keys/token_deployer_private_key.json create mode 100644 examples/test-data/keys/tx_signer_private_key.json create mode 100644 examples/test-data/requests/burn.json create mode 100644 examples/test-data/requests/create_token.json create mode 100644 examples/test-data/requests/mint.json create mode 100644 examples/test-data/requests/nft/create_collection.json create mode 100644 examples/test-data/requests/nft/freeze_collection.json create mode 100644 examples/test-data/requests/nft/mint_nft.json create mode 100644 examples/test-data/requests/nft/transfer_nft.json create mode 100644 examples/test-data/requests/nft/update_token_uri.json create mode 100644 examples/test-data/requests/register_sequencer.json create mode 100644 examples/test-data/requests/transfer.json diff --git a/examples/test-data/genesis/demo-tests/accounts.json b/examples/test-data/genesis/demo-tests/accounts.json new file mode 100644 index 000000000..f65b078f6 --- /dev/null +++ b/examples/test-data/genesis/demo-tests/accounts.json @@ -0,0 +1,3 @@ +{ + "pub_keys": [] +} diff --git a/examples/test-data/genesis/demo-tests/bank.json b/examples/test-data/genesis/demo-tests/bank.json new file mode 100644 index 000000000..d03fcc10f --- /dev/null +++ b/examples/test-data/genesis/demo-tests/bank.json @@ -0,0 +1,17 @@ +{ + "tokens": [ + { + "token_name": "sov-demo-token", + "address_and_balances": [ + [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + 100000000 + ] + ], + "authorized_minters": [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" + ], + "salt": 0 + } + ] +} diff --git a/examples/test-data/genesis/demo-tests/chain_state.json b/examples/test-data/genesis/demo-tests/chain_state.json new file mode 100644 index 000000000..d439a6854 --- /dev/null +++ b/examples/test-data/genesis/demo-tests/chain_state.json @@ -0,0 +1,7 @@ +{ + "initial_slot_height": 0, + "current_time": { + "secs": 0, + "nanos": 0 + } +} diff --git a/examples/test-data/genesis/demo-tests/evm.json b/examples/test-data/genesis/demo-tests/evm.json new file mode 100644 index 000000000..7f5b2b108 --- /dev/null +++ b/examples/test-data/genesis/demo-tests/evm.json @@ -0,0 +1,25 @@ +{ + "data": [ + { + "address": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "balance": "0xffffffffffffffff", + "code_hash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "code": "0x", + "nonce": 0 + } + ], + "chain_id": 1, + "limit_contract_code_size": null, + "spec": { + "0": "SHANGHAI" + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "starting_base_fee": 7, + "block_gas_limit": 30000000, + "genesis_timestamp": 0, + "block_timestamp_delta": 1, + "base_fee_params": { + "max_change_denominator": 8, + "elasticity_multiplier": 2 + } +} diff --git a/examples/test-data/genesis/demo-tests/nft.json b/examples/test-data/genesis/demo-tests/nft.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/examples/test-data/genesis/demo-tests/nft.json @@ -0,0 +1 @@ +{} diff --git a/examples/test-data/genesis/demo-tests/sequencer_registry.json b/examples/test-data/genesis/demo-tests/sequencer_registry.json new file mode 100644 index 000000000..29a5c1fac --- /dev/null +++ b/examples/test-data/genesis/demo-tests/sequencer_registry.json @@ -0,0 +1,9 @@ +{ + "seq_rollup_address": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + "seq_da_address": "celestia1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzf30as", + "coins_to_lock": { + "amount": 50, + "token_address": "sov1zsnx7n2wjvtkr0ttscfgt06pjca3v2e6stxeu49qwynavmk7a8xqlxkkjp" + }, + "is_preferred_sequencer": true +} diff --git a/examples/test-data/genesis/demo-tests/value_setter.json b/examples/test-data/genesis/demo-tests/value_setter.json new file mode 100644 index 000000000..4e209004a --- /dev/null +++ b/examples/test-data/genesis/demo-tests/value_setter.json @@ -0,0 +1,3 @@ +{ + "admin": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" +} diff --git a/examples/test-data/genesis/integration-tests/accounts.json b/examples/test-data/genesis/integration-tests/accounts.json new file mode 100644 index 000000000..f65b078f6 --- /dev/null +++ b/examples/test-data/genesis/integration-tests/accounts.json @@ -0,0 +1,3 @@ +{ + "pub_keys": [] +} diff --git a/examples/test-data/genesis/integration-tests/bank.json b/examples/test-data/genesis/integration-tests/bank.json new file mode 100644 index 000000000..d03fcc10f --- /dev/null +++ b/examples/test-data/genesis/integration-tests/bank.json @@ -0,0 +1,17 @@ +{ + "tokens": [ + { + "token_name": "sov-demo-token", + "address_and_balances": [ + [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + 100000000 + ] + ], + "authorized_minters": [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" + ], + "salt": 0 + } + ] +} diff --git a/examples/test-data/genesis/integration-tests/chain_state.json b/examples/test-data/genesis/integration-tests/chain_state.json new file mode 100644 index 000000000..d439a6854 --- /dev/null +++ b/examples/test-data/genesis/integration-tests/chain_state.json @@ -0,0 +1,7 @@ +{ + "initial_slot_height": 0, + "current_time": { + "secs": 0, + "nanos": 0 + } +} diff --git a/examples/test-data/genesis/integration-tests/evm.json b/examples/test-data/genesis/integration-tests/evm.json new file mode 100644 index 000000000..7f5b2b108 --- /dev/null +++ b/examples/test-data/genesis/integration-tests/evm.json @@ -0,0 +1,25 @@ +{ + "data": [ + { + "address": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "balance": "0xffffffffffffffff", + "code_hash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "code": "0x", + "nonce": 0 + } + ], + "chain_id": 1, + "limit_contract_code_size": null, + "spec": { + "0": "SHANGHAI" + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "starting_base_fee": 7, + "block_gas_limit": 30000000, + "genesis_timestamp": 0, + "block_timestamp_delta": 1, + "base_fee_params": { + "max_change_denominator": 8, + "elasticity_multiplier": 2 + } +} diff --git a/examples/test-data/genesis/integration-tests/nft.json b/examples/test-data/genesis/integration-tests/nft.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/examples/test-data/genesis/integration-tests/nft.json @@ -0,0 +1 @@ +{} diff --git a/examples/test-data/genesis/integration-tests/sequencer_registry.json b/examples/test-data/genesis/integration-tests/sequencer_registry.json new file mode 100644 index 000000000..fe2696ae5 --- /dev/null +++ b/examples/test-data/genesis/integration-tests/sequencer_registry.json @@ -0,0 +1,9 @@ +{ + "seq_rollup_address": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + "seq_da_address": "0000000000000000000000000000000000000000000000000000000000000000", + "coins_to_lock": { + "amount": 50, + "token_address": "sov1zsnx7n2wjvtkr0ttscfgt06pjca3v2e6stxeu49qwynavmk7a8xqlxkkjp" + }, + "is_preferred_sequencer": true +} diff --git a/examples/test-data/genesis/integration-tests/value_setter.json b/examples/test-data/genesis/integration-tests/value_setter.json new file mode 100644 index 000000000..4e209004a --- /dev/null +++ b/examples/test-data/genesis/integration-tests/value_setter.json @@ -0,0 +1,3 @@ +{ + "admin": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" +} diff --git a/examples/test-data/keys/minter_private_key.json b/examples/test-data/keys/minter_private_key.json new file mode 100644 index 000000000..ab9e24319 --- /dev/null +++ b/examples/test-data/keys/minter_private_key.json @@ -0,0 +1,9 @@ +{ + "private_key": { + "key_pair": [ + 35, 110, 128, 203, 34, 44, 78, 208, 67, 27, 9, 59, 58, 197, 62, 106, 167, + 162, 39, 63, 225, 244, 53, 28, 211, 84, 152, 154, 130, 52, 50, 162 + ] + }, + "address": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc" +} diff --git a/examples/test-data/keys/token_deployer_private_key.json b/examples/test-data/keys/token_deployer_private_key.json new file mode 100644 index 000000000..b09514aa4 --- /dev/null +++ b/examples/test-data/keys/token_deployer_private_key.json @@ -0,0 +1,9 @@ +{ + "private_key": { + "key_pair": [ + 117, 251, 248, 217, 135, 70, 194, 105, 46, 80, 41, 66, 185, 56, 200, 35, + 121, 253, 9, 234, 159, 91, 96, 212, 211, 158, 135, 225, 180, 36, 104, 253 + ] + }, + "address": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" +} diff --git a/examples/test-data/keys/tx_signer_private_key.json b/examples/test-data/keys/tx_signer_private_key.json new file mode 100644 index 000000000..231d87a28 --- /dev/null +++ b/examples/test-data/keys/tx_signer_private_key.json @@ -0,0 +1,9 @@ +{ + "private_key": { + "key_pair": [ + 39, 195, 119, 77, 82, 231, 30, 162, 102, 169, 197, 37, 108, 217, 139, 154, + 230, 126, 98, 242, 174, 94, 211, 74, 102, 141, 184, 234, 168, 62, 27, 172 + ] + }, + "address": "sov1dnhqk4mdsj2kwv4xymt8a624xuahfx8906j9usdkx7ensfghndkq8p33f7" +} diff --git a/examples/test-data/requests/burn.json b/examples/test-data/requests/burn.json new file mode 100644 index 000000000..991b5aaa0 --- /dev/null +++ b/examples/test-data/requests/burn.json @@ -0,0 +1,8 @@ +{ + "Burn": { + "coins": { + "amount": 300, + "token_address": "sov16m8fxq0x5wc5aw75fx9rus2p7g2l22zf4re72c3m058g77cdjemsavg2ft" + } + } +} diff --git a/examples/test-data/requests/create_token.json b/examples/test-data/requests/create_token.json new file mode 100644 index 000000000..7b792ac5f --- /dev/null +++ b/examples/test-data/requests/create_token.json @@ -0,0 +1,12 @@ +{ + "CreateToken": { + "salt": 11, + "token_name": "sov-test-token", + "initial_balance": 1000, + "minter_address": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc", + "authorized_minters": [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc" + ] + } +} diff --git a/examples/test-data/requests/mint.json b/examples/test-data/requests/mint.json new file mode 100644 index 000000000..aeb6e9236 --- /dev/null +++ b/examples/test-data/requests/mint.json @@ -0,0 +1,9 @@ +{ + "Mint": { + "coins": { + "amount": 3000, + "token_address": "sov1zdwj8thgev2u3yyrrlekmvtsz4av4tp3m7dm5mx5peejnesga27svq9m72" + }, + "minter_address": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc" + } +} diff --git a/examples/test-data/requests/nft/create_collection.json b/examples/test-data/requests/nft/create_collection.json new file mode 100644 index 000000000..b51a9e409 --- /dev/null +++ b/examples/test-data/requests/nft/create_collection.json @@ -0,0 +1,6 @@ +{ + "CreateCollection": { + "name": "Test Collection", + "collection_uri": "https://foo.bar/test_collection" + } +} diff --git a/examples/test-data/requests/nft/freeze_collection.json b/examples/test-data/requests/nft/freeze_collection.json new file mode 100644 index 000000000..bd6715c49 --- /dev/null +++ b/examples/test-data/requests/nft/freeze_collection.json @@ -0,0 +1,5 @@ +{ + "FreezeCollection": { + "collection_name": "Test Collection" + } +} diff --git a/examples/test-data/requests/nft/mint_nft.json b/examples/test-data/requests/nft/mint_nft.json new file mode 100644 index 000000000..da4674dd6 --- /dev/null +++ b/examples/test-data/requests/nft/mint_nft.json @@ -0,0 +1,9 @@ +{ + "MintNft": { + "collection_name": "Test Collection", + "token_uri": "https://foo.bar/test_collection/nft/42", + "token_id": 42, + "owner": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc", + "frozen": false + } +} diff --git a/examples/test-data/requests/nft/transfer_nft.json b/examples/test-data/requests/nft/transfer_nft.json new file mode 100644 index 000000000..9f7c24b58 --- /dev/null +++ b/examples/test-data/requests/nft/transfer_nft.json @@ -0,0 +1,7 @@ +{ + "TransferNft": { + "collection_address": "sov1j2e3dh76nmuw4gctrqduh0wzqdny8c62z36r2q3883rknw3ky3vsk9g02a", + "token_id": 42, + "to": "sov14fs0gdya9tgwkpglx3gsg3h37r9f5e2pz6qyk87n3ywqch5fqe5s0xyxea" + } +} diff --git a/examples/test-data/requests/nft/update_token_uri.json b/examples/test-data/requests/nft/update_token_uri.json new file mode 100644 index 000000000..f4bedf769 --- /dev/null +++ b/examples/test-data/requests/nft/update_token_uri.json @@ -0,0 +1,8 @@ +{ + "UpdateNft": { + "collection_address": "sov1j2e3dh76nmuw4gctrqduh0wzqdny8c62z36r2q3883rknw3ky3vsk9g02a", + "token_id": 42, + "token_uri": "https://foo.bar/test_collection/nft_new/42", + "frozen": null + } +} diff --git a/examples/test-data/requests/register_sequencer.json b/examples/test-data/requests/register_sequencer.json new file mode 100644 index 000000000..1b8f0fa9c --- /dev/null +++ b/examples/test-data/requests/register_sequencer.json @@ -0,0 +1,8 @@ +{ + "Register": { + "da_address": [ + 13, 5, 25, 31, 28, 30, 5, 27, 20, 6, 29, 10, 14, 29, 20, 12, 22, 13, 19, + 1, 0, 11, 9, 15, 23, 13, 14, 1, 9, 27, 9, 14 + ] + } +} diff --git a/examples/test-data/requests/transfer.json b/examples/test-data/requests/transfer.json new file mode 100644 index 000000000..dac1d8638 --- /dev/null +++ b/examples/test-data/requests/transfer.json @@ -0,0 +1,9 @@ +{ + "Transfer": { + "to": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqklh0qh", + "coins": { + "amount": 200, + "token_address": "sov1zdwj8thgev2u3yyrrlekmvtsz4av4tp3m7dm5mx5peejnesga27svq9m72" + } + } +} From dd65550dad0b292965076e8087e3da95f9b8e729 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 11:43:57 +0200 Subject: [PATCH 07/28] fix format on sov-schema-db --- full-node/db/sov-schema-db/src/db.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/full-node/db/sov-schema-db/src/db.rs b/full-node/db/sov-schema-db/src/db.rs index c7c2dbb07..c245337c1 100644 --- a/full-node/db/sov-schema-db/src/db.rs +++ b/full-node/db/sov-schema-db/src/db.rs @@ -9,8 +9,7 @@ use crate::metrics::{ SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, SCHEMADB_DELETES, SCHEMADB_GET_BYTES, SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, }; -use crate::schema::Schema; -use crate::schema::{ColumnFamilyName, KeyCodec, ValueCodec}; +use crate::schema::{ColumnFamilyName, KeyCodec, Schema, ValueCodec}; use crate::{SchemaBatch, WriteOp}; /// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to From 4018ec195f75e7038d694015618fee6eb35fba02 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 11:49:39 +0200 Subject: [PATCH 08/28] add no-std directive to sov-schema-db --- full-node/db/sov-schema-db/src/iterator.rs | 2 ++ full-node/db/sov-schema-db/src/lib.rs | 1 + full-node/db/sov-schema-db/src/schema.rs | 4 +++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index 3e0e88c64..f7cd57ff5 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,3 +1,5 @@ +use sov_rollup_interface::maybestd::vec::Vec; + #[cfg(feature = "std")] use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; use crate::schema::Schema; diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index afe4d5530..e3280649d 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -3,6 +3,7 @@ #![forbid(unsafe_code)] #![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] //! This library implements a schematized DB on top of [RocksDB](https://rocksdb.org/). It makes //! sure all data passed in and out are structured according to predefined schemas and prevents diff --git a/full-node/db/sov-schema-db/src/schema.rs b/full-node/db/sov-schema-db/src/schema.rs index 8e0b732e6..3d28524a7 100644 --- a/full-node/db/sov-schema-db/src/schema.rs +++ b/full-node/db/sov-schema-db/src/schema.rs @@ -5,7 +5,9 @@ //! A type-safe interface over [`DB`](crate::DB) column families. -use std::fmt::Debug; +use core::fmt::Debug; + +use sov_rollup_interface::maybestd::vec::Vec; use crate::CodecError; From 90047cbc92741ec6336d09eb1b444aff52641f11 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 13:44:42 +0200 Subject: [PATCH 09/28] add `no-std` to `sov-db` --- Cargo.toml | 2 +- docker/credentials/bridge-0.addr | 1 + docker/credentials/bridge-0.key | 9 ++++ full-node/db/sov-db/Cargo.toml | 24 +++++++-- full-node/db/sov-db/src/lib.rs | 6 +++ full-node/db/sov-db/src/schema/tables.rs | 69 ++++++++++++++++-------- full-node/db/sov-db/src/schema/types.rs | 25 ++++++--- 7 files changed, 102 insertions(+), 34 deletions(-) create mode 100644 docker/credentials/bridge-0.addr create mode 100644 docker/credentials/bridge-0.key diff --git a/Cargo.toml b/Cargo.toml index 5bc459b93..bf3e2e80c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,7 +71,7 @@ borsh = { version = "0.10.3", default-features = false, features = ["rc", "bytes # https://github.com/Sovereign-Labs/sovereign-sdk/issues/283 bincode = "1.3.3" bcs = "0.1.5" -byteorder = "1.5.0" +byteorder = { version = "1.5.0", default-features = false } bytes = { version = "1.2.1", default-features = false } digest = { version = "0.10.6", default-features = false, features = ["alloc"] } futures = "0.3" diff --git a/docker/credentials/bridge-0.addr b/docker/credentials/bridge-0.addr new file mode 100644 index 000000000..251e6a8cf --- /dev/null +++ b/docker/credentials/bridge-0.addr @@ -0,0 +1 @@ +celestia1a68m2l85zn5xh0l07clk4rfvnezhywc53g8x7s diff --git a/docker/credentials/bridge-0.key b/docker/credentials/bridge-0.key new file mode 100644 index 000000000..fda19e624 --- /dev/null +++ b/docker/credentials/bridge-0.key @@ -0,0 +1,9 @@ +-----BEGIN TENDERMINT PRIVATE KEY----- +kdf: bcrypt +salt: 68B0092F4FC5386C20DA96ECEE1BFE09 +type: secp256k1 + +JEOTdvPjs/4G+Jhz0hyNgdN5CgOCCcf5zvECY6zp+AN6IT6rTW0xbGqgFqbX6Yyi +NUV8e1fo9zhoytjrHjcCgHRnGiBGFQf1Ld4sBzE= +=TmEg +-----END TENDERMINT PRIVATE KEY----- diff --git a/full-node/db/sov-db/Cargo.toml b/full-node/db/sov-db/Cargo.toml index 8a4a653dc..73b08eff0 100644 --- a/full-node/db/sov-db/Cargo.toml +++ b/full-node/db/sov-db/Cargo.toml @@ -22,15 +22,15 @@ sov-rollup-interface = { path = "../../../rollup-interface", version = "0.2", fe # External anyhow = { workspace = true } arbitrary = { workspace = true, optional = true } +bincode = { workspace = true, optional = true } byteorder = { workspace = true } borsh = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -serde = { workspace = true, features = ["derive"] } +rocksdb = { workspace = true, optional = true } +serde = { workspace = true } tempfile = { workspace = true, optional = true } -rocksdb = { workspace = true } -bincode = { workspace = true } -tokio = { workspace = true } +tokio = { workspace = true, optional = true } [dev-dependencies] @@ -38,9 +38,23 @@ tempfile = { workspace = true } [features] +default = ["std"] arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", - "dep:tempfile" + "dep:tempfile", + "std" ] +std = [ + "anyhow/default", + "bincode", + "byteorder/default", + "borsh/default", + "proptest?/default", + "rocksdb", + "serde/default", + "sov-schema-db/default", + "sov-rollup-interface/default", +] +tokio = ["dep:tokio", "std"] diff --git a/full-node/db/sov-db/src/lib.rs b/full-node/db/sov-db/src/lib.rs index b1e99aec4..a88520f56 100644 --- a/full-node/db/sov-db/src/lib.rs +++ b/full-node/db/sov-db/src/lib.rs @@ -4,22 +4,28 @@ //! - DB "Table" definitions can be found in the [`schema`] module //! - Types and traits for storing state data can be found in the [`state_db`] module //! - The default db configuration is generated in the [`rocks_db_config`] module + #![forbid(unsafe_code)] #![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] /// Implements a wrapper around RocksDB meant for storing rollup history ("the ledger"). /// This wrapper implements helper traits for writing blocks to the ledger, and for /// serving historical data via RPC +#[cfg(all(feature = "std", feature = "tokio"))] pub mod ledger_db; /// Implements helpers for configuring RocksDB. +#[cfg(feature = "std")] pub mod rocks_db_config; /// Defines the tables used by the Sovereign SDK. pub mod schema; /// Implements a wrapper around [RocksDB](https://rocksdb.org/) meant for storing rollup state. /// This is primarily used as the backing store for the [JMT(JellyfishMerkleTree)](https://docs.rs/jmt/latest/jmt/). +#[cfg(feature = "std")] pub mod state_db; /// Implements a wrapper around RocksDB meant for storing state only accessible /// outside of the zkVM execution environment, as this data is not included in /// the JMT and does not contribute to proofs of execution. +#[cfg(feature = "std")] pub mod native_db; diff --git a/full-node/db/sov-db/src/schema/tables.rs b/full-node/db/sov-db/src/schema/tables.rs index 6f267d971..1f1fd1183 100644 --- a/full-node/db/sov-db/src/schema/tables.rs +++ b/full-node/db/sov-db/src/schema/tables.rs @@ -25,23 +25,27 @@ //! Module Accessory State Table: //! - `(ModuleAddress, Key) -> Value` -use borsh::{maybestd, BorshDeserialize, BorshSerialize}; -use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use borsh::{BorshDeserialize, BorshSerialize}; use jmt::storage::{Node, NodeKey}; +#[cfg(feature = "std")] use jmt::Version; -use sov_rollup_interface::stf::{Event, EventKey}; +use sov_rollup_interface::maybestd::vec::Vec; +use sov_rollup_interface::stf::EventKey; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; -use sov_schema_db::{CodecError, SeekKeyEncoder}; +use sov_schema_db::CodecError; use super::types::{ - AccessoryKey, AccessoryStateValue, BatchNumber, DbHash, EventNumber, JmtValue, SlotNumber, - StateKey, StoredBatch, StoredSlot, StoredTransaction, TxNumber, + AccessoryKey, AccessoryStateValue, BatchNumber, DbHash, EventNumber, SlotNumber, StateKey, + TxNumber, }; +#[cfg(feature = "std")] +use super::types::{JmtValue, StoredBatch, StoredSlot, StoredTransaction}; /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. pub const STATE_TABLES: &[&str] = &[ KeyHashToKey::table_name(), + #[cfg(feature = "std")] JmtValues::table_name(), JmtNodes::table_name(), ]; @@ -49,13 +53,17 @@ pub const STATE_TABLES: &[&str] = &[ /// A list of all tables used by the LedgerDB. These tables store rollup "history" - meaning /// transaction, events, receipts, etc. pub const LEDGER_TABLES: &[&str] = &[ + #[cfg(feature = "std")] SlotByNumber::table_name(), SlotByHash::table_name(), BatchByHash::table_name(), + #[cfg(feature = "std")] BatchByNumber::table_name(), TxByHash::table_name(), + #[cfg(feature = "std")] TxByNumber::table_name(), EventByKey::table_name(), + #[cfg(feature = "std")] EventByNumber::table_name(), ]; @@ -104,8 +112,8 @@ macro_rules! define_table_without_codec { } } - impl ::std::fmt::Display for $table_name { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + impl ::core::fmt::Display for $table_name { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { ::core::write!(f, "{}", stringify!($table_name)) } } @@ -117,7 +125,7 @@ macro_rules! impl_borsh_value_codec { impl ::sov_schema_db::schema::ValueCodec<$table_name> for $value { fn encode_value( &self, - ) -> ::std::result::Result< + ) -> ::core::result::Result< ::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError, > { @@ -126,7 +134,7 @@ macro_rules! impl_borsh_value_codec { fn decode_value( data: &[u8], - ) -> ::std::result::Result { + ) -> ::core::result::Result { ::borsh::BorshDeserialize::deserialize_reader(&mut &data[..]).map_err(Into::into) } } @@ -148,13 +156,13 @@ macro_rules! define_table_with_default_codec { define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { - fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { ::borsh::BorshSerialize::try_to_vec(self).map_err(Into::into) } } impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { - fn decode_key(data: &[u8]) -> ::std::result::Result { + fn decode_key(data: &[u8]) -> ::core::result::Result { ::borsh::BorshDeserialize::deserialize_reader(&mut &data[..]).map_err(Into::into) } } @@ -168,12 +176,14 @@ macro_rules! define_table_with_default_codec { /// little-endian, but RocksDB uses lexicographic ordering which is only /// compatible with big-endian, so we use [`bincode`] with the big-endian option /// here. +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] macro_rules! define_table_with_seek_key_codec { ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { - fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { use ::anyhow::Context as _; use ::bincode::Options as _; @@ -186,7 +196,7 @@ macro_rules! define_table_with_seek_key_codec { } impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { - fn decode_key(data: &[u8]) -> ::std::result::Result { + fn decode_key(data: &[u8]) -> ::core::result::Result { use ::anyhow::Context as _; use ::bincode::Options as _; @@ -199,7 +209,7 @@ macro_rules! define_table_with_seek_key_codec { } impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { - fn encode_seek_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + fn encode_seek_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { >::encode_key(self) } } @@ -209,6 +219,8 @@ macro_rules! define_table_with_seek_key_codec { } // fn deser(target: &mut &[u8]) -> Result; +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] define_table_with_seek_key_codec!( /// The primary source for slot data (SlotByNumber) SlotNumber => StoredSlot @@ -224,6 +236,8 @@ define_table_with_default_codec!( (ModuleAccessoryState) AccessoryKey => AccessoryStateValue ); +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] define_table_with_seek_key_codec!( /// The primary source for batch data (BatchByNumber) BatchNumber => StoredBatch @@ -234,6 +248,8 @@ define_table_with_default_codec!( (BatchByHash) DbHash => BatchNumber ); +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] define_table_with_seek_key_codec!( /// The primary source for transaction data (TxByNumber) TxNumber => StoredTransaction @@ -244,9 +260,11 @@ define_table_with_default_codec!( (TxByHash) DbHash => TxNumber ); +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] define_table_with_seek_key_codec!( /// The primary store for event data - (EventByNumber) EventNumber => Event + (EventByNumber) EventNumber => sov_rollup_interface::stf::Event ); define_table_with_default_codec!( @@ -280,41 +298,50 @@ impl ValueCodec for Node { } } +#[cfg(feature = "std")] define_table_without_codec!( /// The source of truth for JMT values by version (JmtValues) (StateKey, Version) => JmtValue ); +#[cfg(feature = "std")] impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { fn encode_key(&self) -> sov_schema_db::schema::Result> { + use byteorder::WriteBytesExt; let mut out = - Vec::with_capacity(self.0.as_ref().len() + std::mem::size_of::() + 8); + Vec::with_capacity(self.0.as_ref().len() + core::mem::size_of::() + 8); self.0 .as_ref() .serialize(&mut out) .map_err(CodecError::from)?; // Write the version in big-endian order so that sorting order is based on the most-significant bytes of the key - out.write_u64::(self.1) + out.write_u64::(self.1) .expect("serialization to vec is infallible"); Ok(out) } } -impl + PartialEq + core::fmt::Debug> SeekKeyEncoder for (T, Version) { +#[cfg(feature = "std")] +impl + PartialEq + core::fmt::Debug> sov_schema_db::SeekKeyEncoder + for (T, Version) +{ fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { self.encode_key() } } +#[cfg(feature = "std")] impl KeyDecoder for (StateKey, Version) { fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { - let mut cursor = maybestd::io::Cursor::new(data); + use byteorder::ReadBytesExt; + let mut cursor = std::io::Cursor::new(data); let key = Vec::::deserialize_reader(&mut cursor)?; - let version = cursor.read_u64::()?; + let version = cursor.read_u64::()?; Ok((key, version)) } } +#[cfg(feature = "std")] impl ValueCodec for JmtValue { fn encode_value(&self) -> sov_schema_db::schema::Result> { self.try_to_vec().map_err(CodecError::from) diff --git a/full-node/db/sov-db/src/schema/types.rs b/full-node/db/sov-db/src/schema/types.rs index d46d5d3d0..35591c1c7 100644 --- a/full-node/db/sov-db/src/schema/types.rs +++ b/full-node/db/sov-db/src/schema/types.rs @@ -1,10 +1,15 @@ -use std::sync::Arc; - use borsh::{BorshDeserialize, BorshSerialize}; +#[cfg(feature = "std")] use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use sov_rollup_interface::rpc::{BatchResponse, TxIdentifier, TxResponse}; -use sov_rollup_interface::stf::{Event, EventKey, TransactionReceipt}; +use sov_rollup_interface::maybestd::sync::Arc; +use sov_rollup_interface::maybestd::vec::Vec; +use sov_rollup_interface::rpc::TxIdentifier; +#[cfg(feature = "std")] +use sov_rollup_interface::rpc::{BatchResponse, TxResponse}; +use sov_rollup_interface::stf::EventKey; +#[cfg(feature = "std")] +use sov_rollup_interface::stf::{Event, TransactionReceipt}; /// A cheaply cloneable bytes abstraction for use within the trust boundary of the node /// (i.e. when interfacing with the database). Serializes and deserializes more efficiently, @@ -64,7 +69,7 @@ pub struct StoredSlot { /// Any extra data which the rollup decides to store relating to this slot. pub extra_data: DbBytes, /// The range of batches which occurred in this slot. - pub batches: std::ops::Range, + pub batches: core::ops::Range, } /// The on-disk format for a batch. Stores the hash and identifies the range of transactions @@ -75,11 +80,13 @@ pub struct StoredBatch { /// The hash of the batch, as reported by the DA layer. pub hash: DbHash, /// The range of transactions which occurred in this batch. - pub txs: std::ops::Range, + pub txs: core::ops::Range, /// A customer "receipt" for this batch defined by the rollup. pub custom_receipt: DbBytes, } +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] impl TryFrom for BatchResponse { type Error = anyhow::Error; fn try_from(value: StoredBatch) -> Result { @@ -99,13 +106,15 @@ pub struct StoredTransaction { /// The hash of the transaction. pub hash: DbHash, /// The range of event-numbers emitted by this transaction. - pub events: std::ops::Range, + pub events: core::ops::Range, /// The serialized transaction data, if the rollup decides to store it. pub body: Option>, /// A customer "receipt" for this transaction defined by the rollup. pub custom_receipt: DbBytes, } +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] impl TryFrom for TxResponse { type Error = anyhow::Error; fn try_from(value: StoredTransaction) -> Result { @@ -119,6 +128,8 @@ impl TryFrom for TxResponse { } /// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database. +// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` +#[cfg(feature = "std")] pub fn split_tx_for_storage( tx: TransactionReceipt, event_offset: u64, From b8ca399ca82aa65f84da93e53c336cd54f3731ea Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 13:54:59 +0200 Subject: [PATCH 10/28] add tokio to sov-stf-runner/sov-db --- full-node/sov-stf-runner/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/full-node/sov-stf-runner/Cargo.toml b/full-node/sov-stf-runner/Cargo.toml index 6f3931ca0..e34a812b7 100644 --- a/full-node/sov-stf-runner/Cargo.toml +++ b/full-node/sov-stf-runner/Cargo.toml @@ -22,7 +22,7 @@ tokio = { workspace = true, optional = true } hex = { workspace = true } tracing = { workspace = true, optional = true } futures = { workspace = true, optional = true } -sov-db = { path = "../db/sov-db", version = "0.2", optional = true } +sov-db = { path = "../db/sov-db", version = "0.2", optional = true, features = ["tokio"] } sov-rollup-interface = { path = "../../rollup-interface", version = "0.2" } From 5dd592b965bbecb1d3eeb609964e62e193455068 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 14:04:46 +0200 Subject: [PATCH 11/28] add default-features to byteorder --- full-node/db/sov-schema-db/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index 3e2255819..cbc0ebe31 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -24,7 +24,7 @@ thiserror = { workspace = true, optional = true } sov-rollup-interface = { path = "../../../rollup-interface", default-features = false } [dev-dependencies] -byteorder = { workspace = true } +byteorder = { workspace = true, default-features = true } tempfile = { workspace = true } [features] From 55422884f093863dd0f0d793d83a668c016ad00e Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 14:30:34 +0200 Subject: [PATCH 12/28] add `tokio` as default feature of `sov-db` --- full-node/db/sov-db/Cargo.toml | 2 +- full-node/sov-stf-runner/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/full-node/db/sov-db/Cargo.toml b/full-node/db/sov-db/Cargo.toml index 73b08eff0..0f1c08ec3 100644 --- a/full-node/db/sov-db/Cargo.toml +++ b/full-node/db/sov-db/Cargo.toml @@ -38,7 +38,7 @@ tempfile = { workspace = true } [features] -default = ["std"] +default = ["std", "tokio"] arbitrary = [ "dep:arbitrary", "dep:proptest", diff --git a/full-node/sov-stf-runner/Cargo.toml b/full-node/sov-stf-runner/Cargo.toml index e34a812b7..6f3931ca0 100644 --- a/full-node/sov-stf-runner/Cargo.toml +++ b/full-node/sov-stf-runner/Cargo.toml @@ -22,7 +22,7 @@ tokio = { workspace = true, optional = true } hex = { workspace = true } tracing = { workspace = true, optional = true } futures = { workspace = true, optional = true } -sov-db = { path = "../db/sov-db", version = "0.2", optional = true, features = ["tokio"] } +sov-db = { path = "../db/sov-db", version = "0.2", optional = true } sov-rollup-interface = { path = "../../rollup-interface", version = "0.2" } From 74f5760534c65df87d782ef480f9be13894d232c Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 14:59:38 +0200 Subject: [PATCH 13/28] restore nightly json files --- module-system/sov-cli/test-data/requests/burn.json | 8 ++++++++ .../sov-cli/test-data/requests/create_token.json | 13 +++++++++++++ module-system/sov-cli/test-data/requests/mint.json | 9 +++++++++ .../sov-cli/test-data/requests/transfer.json | 9 +++++++++ 4 files changed, 39 insertions(+) create mode 100644 module-system/sov-cli/test-data/requests/burn.json create mode 100644 module-system/sov-cli/test-data/requests/create_token.json create mode 100644 module-system/sov-cli/test-data/requests/mint.json create mode 100644 module-system/sov-cli/test-data/requests/transfer.json diff --git a/module-system/sov-cli/test-data/requests/burn.json b/module-system/sov-cli/test-data/requests/burn.json new file mode 100644 index 000000000..991b5aaa0 --- /dev/null +++ b/module-system/sov-cli/test-data/requests/burn.json @@ -0,0 +1,8 @@ +{ + "Burn": { + "coins": { + "amount": 300, + "token_address": "sov16m8fxq0x5wc5aw75fx9rus2p7g2l22zf4re72c3m058g77cdjemsavg2ft" + } + } +} diff --git a/module-system/sov-cli/test-data/requests/create_token.json b/module-system/sov-cli/test-data/requests/create_token.json new file mode 100644 index 000000000..16c27afd1 --- /dev/null +++ b/module-system/sov-cli/test-data/requests/create_token.json @@ -0,0 +1,13 @@ +{ + "CreateToken": { + "salt": 11, + "token_name": "sov-test-token", + "initial_balance": 1000, + "minter_address": "sov1x3jtvq0zwhj2ucsc4hqugskvralrulxvf53vwtkred93s2x9gmzs04jvyr", + "authorized_minters": [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + "sov1x3jtvq0zwhj2ucsc4hqugskvralrulxvf53vwtkred93s2x9gmzs04jvyr", + "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc" + ] + } +} diff --git a/module-system/sov-cli/test-data/requests/mint.json b/module-system/sov-cli/test-data/requests/mint.json new file mode 100644 index 000000000..10436e602 --- /dev/null +++ b/module-system/sov-cli/test-data/requests/mint.json @@ -0,0 +1,9 @@ +{ + "Mint": { + "coins": { + "amount": 3000, + "token_address": "sov16m8fxq0x5wc5aw75fx9rus2p7g2l22zf4re72c3m058g77cdjemsavg2ft" + }, + "minter_address": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc" + } +} diff --git a/module-system/sov-cli/test-data/requests/transfer.json b/module-system/sov-cli/test-data/requests/transfer.json new file mode 100644 index 000000000..92411d14c --- /dev/null +++ b/module-system/sov-cli/test-data/requests/transfer.json @@ -0,0 +1,9 @@ +{ + "Transfer": { + "to": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqklh0qh", + "coins": { + "amount": 200, + "token_address": "sov16m8fxq0x5wc5aw75fx9rus2p7g2l22zf4re72c3m058g77cdjemsavg2ft" + } + } +} From 2f1d537982c224fa69749e2c3b42aef812dd7706 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Fri, 20 Oct 2023 15:19:03 +0200 Subject: [PATCH 14/28] restore more json files --- .../test-data/genesis/accounts.json | 3 +++ sov-rollup-starter/test-data/genesis/bank.json | 17 +++++++++++++++++ .../test-data/genesis/sequencer_registry.json | 9 +++++++++ 3 files changed, 29 insertions(+) create mode 100644 sov-rollup-starter/test-data/genesis/accounts.json create mode 100644 sov-rollup-starter/test-data/genesis/bank.json create mode 100644 sov-rollup-starter/test-data/genesis/sequencer_registry.json diff --git a/sov-rollup-starter/test-data/genesis/accounts.json b/sov-rollup-starter/test-data/genesis/accounts.json new file mode 100644 index 000000000..f65b078f6 --- /dev/null +++ b/sov-rollup-starter/test-data/genesis/accounts.json @@ -0,0 +1,3 @@ +{ + "pub_keys": [] +} diff --git a/sov-rollup-starter/test-data/genesis/bank.json b/sov-rollup-starter/test-data/genesis/bank.json new file mode 100644 index 000000000..d03fcc10f --- /dev/null +++ b/sov-rollup-starter/test-data/genesis/bank.json @@ -0,0 +1,17 @@ +{ + "tokens": [ + { + "token_name": "sov-demo-token", + "address_and_balances": [ + [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + 100000000 + ] + ], + "authorized_minters": [ + "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94" + ], + "salt": 0 + } + ] +} diff --git a/sov-rollup-starter/test-data/genesis/sequencer_registry.json b/sov-rollup-starter/test-data/genesis/sequencer_registry.json new file mode 100644 index 000000000..fe2696ae5 --- /dev/null +++ b/sov-rollup-starter/test-data/genesis/sequencer_registry.json @@ -0,0 +1,9 @@ +{ + "seq_rollup_address": "sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94", + "seq_da_address": "0000000000000000000000000000000000000000000000000000000000000000", + "coins_to_lock": { + "amount": 50, + "token_address": "sov1zsnx7n2wjvtkr0ttscfgt06pjca3v2e6stxeu49qwynavmk7a8xqlxkkjp" + }, + "is_preferred_sequencer": true +} From 5c5e320a8ff6080f5d34b83308034fbf83c5be64 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 14:52:42 +0200 Subject: [PATCH 15/28] move std feature shield bloat code to dedicated sub-modules --- full-node/db/sov-schema-db/Cargo.toml | 6 +- full-node/db/sov-schema-db/src/iterator.rs | 154 +++++++++++---------- full-node/db/sov-schema-db/src/lib.rs | 37 ++--- 3 files changed, 102 insertions(+), 95 deletions(-) diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index b65c506e5..8454ff30c 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -31,7 +31,11 @@ tempfile = { workspace = true } [features] default = ["std"] -arbitrary = ["dep:proptest", "dep:proptest-derive"] +arbitrary = [ + "dep:proptest", + "dep:proptest-derive", + "std" +] std = [ "anyhow/default", "once_cell", diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index f7cd57ff5..50ffb21af 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,10 +1,9 @@ use sov_rollup_interface::maybestd::vec::Vec; -#[cfg(feature = "std")] -use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; use crate::schema::Schema; + #[cfg(feature = "std")] -use crate::schema::{KeyDecoder, ValueCodec}; +pub(crate) use use_std::ScanDirection; /// This defines a type that can be used to seek a [`SchemaIterator`], via /// interfaces like [`SchemaIterator::seek`]. Mind you, not all @@ -28,13 +27,6 @@ pub trait SeekKeyEncoder: Sized { fn encode_seek_key(&self) -> crate::schema::Result>; } -// unused outside `std` -#[cfg(feature = "std")] -pub(crate) enum ScanDirection { - Forward, - Backward, -} - /// DB Iterator parameterized on [`Schema`] that seeks with [`Schema::Key`] and yields /// [`Schema::Key`] and [`Schema::Value`] pairs. #[cfg(feature = "std")] @@ -45,86 +37,96 @@ pub struct SchemaIterator<'a, S> { } #[cfg(feature = "std")] -impl<'a, S> SchemaIterator<'a, S> -where - S: Schema, -{ - pub(crate) fn new(db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection) -> Self { - SchemaIterator { - db_iter, - direction, - phantom: core::marker::PhantomData, - } - } +mod use_std { + use super::*; - /// Seeks to the first key. - pub fn seek_to_first(&mut self) { - self.db_iter.seek_to_first(); - } + use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; + use crate::schema::{KeyDecoder, ValueCodec}; - /// Seeks to the last key. - pub fn seek_to_last(&mut self) { - self.db_iter.seek_to_last(); + pub(crate) enum ScanDirection { + Forward, + Backward, } - /// Seeks to the first key whose binary representation is equal to or greater than that of the - /// `seek_key`. - pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { - let key = seek_key.encode_seek_key()?; - self.db_iter.seek(&key); - Ok(()) - } + impl<'a, S> core::iter::FusedIterator for SchemaIterator<'a, S> where S: Schema {} - /// Seeks to the last key whose binary representation is less than or equal to that of the - /// `seek_key`. - /// - /// See example in [`RocksDB doc`](https://github.com/facebook/rocksdb/wiki/SeekForPrev). - pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { - let key = seek_key.encode_seek_key()?; - self.db_iter.seek_for_prev(&key); - Ok(()) - } + impl<'a, S> Iterator for SchemaIterator<'a, S> + where + S: Schema, + { + type Item = anyhow::Result<(S::Key, S::Value)>; - fn next_impl(&mut self) -> anyhow::Result> { - let _timer = SCHEMADB_ITER_LATENCY_SECONDS - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .start_timer(); + fn next(&mut self) -> Option { + self.next_impl().transpose() + } + } - if !self.db_iter.valid() { - self.db_iter.status()?; - return Ok(None); + impl<'a, S> SchemaIterator<'a, S> + where + S: Schema, + { + pub(crate) fn new(db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection) -> Self { + SchemaIterator { + db_iter, + direction, + phantom: core::marker::PhantomData, + } } - let raw_key = self.db_iter.key().expect("db_iter.key() failed."); - let raw_value = self.db_iter.value().expect("db_iter.value() failed."); + /// Seeks to the first key. + pub fn seek_to_first(&mut self) { + self.db_iter.seek_to_first(); + } - SCHEMADB_ITER_BYTES - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .observe((raw_key.len() + raw_value.len()) as f64); + /// Seeks to the last key. + pub fn seek_to_last(&mut self) { + self.db_iter.seek_to_last(); + } - let key = >::decode_key(raw_key)?; - let value = >::decode_value(raw_value)?; + /// Seeks to the first key whose binary representation is equal to or greater than that of the + /// `seek_key`. + pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { + let key = seek_key.encode_seek_key()?; + self.db_iter.seek(&key); + Ok(()) + } - match self.direction { - ScanDirection::Forward => self.db_iter.next(), - ScanDirection::Backward => self.db_iter.prev(), + /// Seeks to the last key whose binary representation is less than or equal to that of the + /// `seek_key`. + /// + /// See example in [`RocksDB doc`](https://github.com/facebook/rocksdb/wiki/SeekForPrev). + pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { + let key = seek_key.encode_seek_key()?; + self.db_iter.seek_for_prev(&key); + Ok(()) } - Ok(Some((key, value))) - } -} + fn next_impl(&mut self) -> anyhow::Result> { + let _timer = SCHEMADB_ITER_LATENCY_SECONDS + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .start_timer(); -#[cfg(feature = "std")] -impl<'a, S> Iterator for SchemaIterator<'a, S> -where - S: Schema, -{ - type Item = anyhow::Result<(S::Key, S::Value)>; - - fn next(&mut self) -> Option { - self.next_impl().transpose() + if !self.db_iter.valid() { + self.db_iter.status()?; + return Ok(None); + } + + let raw_key = self.db_iter.key().expect("db_iter.key() failed."); + let raw_value = self.db_iter.value().expect("db_iter.value() failed."); + + SCHEMADB_ITER_BYTES + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .observe((raw_key.len() + raw_value.len()) as f64); + + let key = >::decode_key(raw_key)?; + let value = >::decode_value(raw_value)?; + + match self.direction { + ScanDirection::Forward => self.db_iter.next(), + ScanDirection::Backward => self.db_iter.prev(), + } + + Ok(Some((key, value))) + } } } - -#[cfg(feature = "std")] -impl<'a, S> core::iter::FusedIterator for SchemaIterator<'a, S> where S: Schema {} diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index 5c121c9ac..7983c1ab0 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -158,29 +158,30 @@ pub enum CodecError { } #[cfg(not(feature = "std"))] -impl core::fmt::Display for CodecError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{:?}", self) +mod no_std { + use super::*; + + impl core::fmt::Display for CodecError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{:?}", self) + } } -} -#[cfg(not(feature = "std"))] -impl From for anyhow::Error { - fn from(e: CodecError) -> Self { - anyhow::Error::msg(e) + impl From for anyhow::Error { + fn from(e: CodecError) -> Self { + anyhow::Error::msg(e) + } } -} -#[cfg(not(feature = "std"))] -impl From for CodecError { - fn from(e: anyhow::Error) -> Self { - CodecError::Wrapped(e) + impl From for CodecError { + fn from(e: anyhow::Error) -> Self { + CodecError::Wrapped(e) + } } -} -#[cfg(not(feature = "std"))] -impl From for CodecError { - fn from(e: io::Error) -> Self { - CodecError::Io(e) + impl From for CodecError { + fn from(e: io::Error) -> Self { + CodecError::Io(e) + } } } From ae46f734adc03f531666918de3f72b2950b9d4d1 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 15:08:30 +0200 Subject: [PATCH 16/28] move bloat feature shield to dedicated use_std modules --- full-node/db/sov-db/src/schema/tables.rs | 252 ++++++++++----------- full-node/db/sov-db/src/schema/types.rs | 107 ++++----- full-node/db/sov-schema-db/src/iterator.rs | 6 +- 3 files changed, 181 insertions(+), 184 deletions(-) diff --git a/full-node/db/sov-db/src/schema/tables.rs b/full-node/db/sov-db/src/schema/tables.rs index 1f1fd1183..17136a3c2 100644 --- a/full-node/db/sov-db/src/schema/tables.rs +++ b/full-node/db/sov-db/src/schema/tables.rs @@ -27,19 +27,17 @@ use borsh::{BorshDeserialize, BorshSerialize}; use jmt::storage::{Node, NodeKey}; -#[cfg(feature = "std")] -use jmt::Version; use sov_rollup_interface::maybestd::vec::Vec; use sov_rollup_interface::stf::EventKey; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; use sov_schema_db::CodecError; +#[cfg(feature = "std")] +pub(crate) use use_std::*; use super::types::{ AccessoryKey, AccessoryStateValue, BatchNumber, DbHash, EventNumber, SlotNumber, StateKey, TxNumber, }; -#[cfg(feature = "std")] -use super::types::{JmtValue, StoredBatch, StoredSlot, StoredTransaction}; /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. @@ -171,61 +169,6 @@ macro_rules! define_table_with_default_codec { }; } -/// Macro similar to [`define_table_with_default_codec`], but to be used when -/// your key type should be [`SeekKeyEncoder`]. Borsh serializes integers as -/// little-endian, but RocksDB uses lexicographic ordering which is only -/// compatible with big-endian, so we use [`bincode`] with the big-endian option -/// here. -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -macro_rules! define_table_with_seek_key_codec { - ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { - define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); - - impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { - fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { - use ::anyhow::Context as _; - use ::bincode::Options as _; - - let bincode_options = ::bincode::options() - .with_fixint_encoding() - .with_big_endian(); - - bincode_options.serialize(self).context("Failed to serialize key").map_err(Into::into) - } - } - - impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { - fn decode_key(data: &[u8]) -> ::core::result::Result { - use ::anyhow::Context as _; - use ::bincode::Options as _; - - let bincode_options = ::bincode::options() - .with_fixint_encoding() - .with_big_endian(); - - bincode_options.deserialize_from(&mut &data[..]).context("Failed to deserialize key").map_err(Into::into) - } - } - - impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { - fn encode_seek_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { - >::encode_key(self) - } - } - - impl_borsh_value_codec!($table_name, $value); - }; -} - -// fn deser(target: &mut &[u8]) -> Result; -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -define_table_with_seek_key_codec!( - /// The primary source for slot data - (SlotByNumber) SlotNumber => StoredSlot -); - define_table_with_default_codec!( /// A "secondary index" for slot data by hash (SlotByHash) DbHash => SlotNumber @@ -236,37 +179,16 @@ define_table_with_default_codec!( (ModuleAccessoryState) AccessoryKey => AccessoryStateValue ); -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -define_table_with_seek_key_codec!( - /// The primary source for batch data - (BatchByNumber) BatchNumber => StoredBatch -); - define_table_with_default_codec!( /// A "secondary index" for batch data by hash (BatchByHash) DbHash => BatchNumber ); -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -define_table_with_seek_key_codec!( - /// The primary source for transaction data - (TxByNumber) TxNumber => StoredTransaction -); - define_table_with_default_codec!( /// A "secondary index" for transaction data by hash (TxByHash) DbHash => TxNumber ); -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -define_table_with_seek_key_codec!( - /// The primary store for event data - (EventByNumber) EventNumber => sov_rollup_interface::stf::Event -); - define_table_with_default_codec!( /// A "secondary index" for event data by key (EventByKey) (EventKey, TxNumber, EventNumber) => () @@ -298,64 +220,138 @@ impl ValueCodec for Node { } } -#[cfg(feature = "std")] -define_table_without_codec!( - /// The source of truth for JMT values by version - (JmtValues) (StateKey, Version) => JmtValue +define_table_with_default_codec!( + /// A mapping from key-hashes to their preimages and latest version. Since we store raw + /// key-value pairs instead of keyHash->value pairs, + /// this table is required to implement the `jmt::TreeReader` trait, + /// which requires the ability to fetch values by hash. + (KeyHashToKey) [u8;32] => StateKey ); #[cfg(feature = "std")] -impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { - fn encode_key(&self) -> sov_schema_db::schema::Result> { - use byteorder::WriteBytesExt; - let mut out = - Vec::with_capacity(self.0.as_ref().len() + core::mem::size_of::() + 8); - self.0 - .as_ref() - .serialize(&mut out) - .map_err(CodecError::from)?; - // Write the version in big-endian order so that sorting order is based on the most-significant bytes of the key - out.write_u64::(self.1) - .expect("serialization to vec is infallible"); - Ok(out) - } +mod use_std { + use jmt::Version; + + use super::super::types::{JmtValue, StoredBatch, StoredSlot, StoredTransaction}; + use super::*; + + /// Macro similar to [`define_table_with_default_codec`], but to be used when + /// your key type should be [`SeekKeyEncoder`]. Borsh serializes integers as + /// little-endian, but RocksDB uses lexicographic ordering which is only + /// compatible with big-endian, so we use [`bincode`] with the big-endian option + /// here. + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + macro_rules! define_table_with_seek_key_codec { + ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { + define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); + + impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { + fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + use ::anyhow::Context as _; + use ::bincode::Options as _; + + let bincode_options = ::bincode::options() + .with_fixint_encoding() + .with_big_endian(); + + bincode_options.serialize(self).context("Failed to serialize key").map_err(Into::into) + } + } + + impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { + fn decode_key(data: &[u8]) -> ::core::result::Result { + use ::anyhow::Context as _; + use ::bincode::Options as _; + + let bincode_options = ::bincode::options() + .with_fixint_encoding() + .with_big_endian(); + + bincode_options.deserialize_from(&mut &data[..]).context("Failed to deserialize key").map_err(Into::into) + } + } + + impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { + fn encode_seek_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + >::encode_key(self) + } + } + + impl_borsh_value_codec!($table_name, $value); + }; } -#[cfg(feature = "std")] -impl + PartialEq + core::fmt::Debug> sov_schema_db::SeekKeyEncoder - for (T, Version) -{ - fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { - self.encode_key() + // fn deser(target: &mut &[u8]) -> Result; + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + define_table_with_seek_key_codec!( + /// The primary source for slot data + (SlotByNumber) SlotNumber => StoredSlot + ); + + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + define_table_with_seek_key_codec!( + /// The primary source for batch data + (BatchByNumber) BatchNumber => StoredBatch + ); + + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + define_table_with_seek_key_codec!( + /// The primary source for transaction data + (TxByNumber) TxNumber => StoredTransaction + ); + + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + define_table_with_seek_key_codec!( + /// The primary store for event data + (EventByNumber) EventNumber => sov_rollup_interface::stf::Event + ); + + define_table_without_codec!( + /// The source of truth for JMT values by version + (JmtValues) (StateKey, Version) => JmtValue + ); + + impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { + fn encode_key(&self) -> sov_schema_db::schema::Result> { + use byteorder::WriteBytesExt; + let mut out = + Vec::with_capacity(self.0.as_ref().len() + core::mem::size_of::() + 8); + self.0 + .as_ref() + .serialize(&mut out) + .map_err(CodecError::from)?; + // Write the version in big-endian order so that sorting order is based on the most-significant bytes of the key + out.write_u64::(self.1) + .expect("serialization to vec is infallible"); + Ok(out) + } } -} -#[cfg(feature = "std")] -impl KeyDecoder for (StateKey, Version) { - fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { - use byteorder::ReadBytesExt; - let mut cursor = std::io::Cursor::new(data); - let key = Vec::::deserialize_reader(&mut cursor)?; - let version = cursor.read_u64::()?; - Ok((key, version)) + impl + PartialEq + core::fmt::Debug> sov_schema_db::SeekKeyEncoder + for (T, Version) + { + fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { + self.encode_key() + } } -} -#[cfg(feature = "std")] -impl ValueCodec for JmtValue { - fn encode_value(&self) -> sov_schema_db::schema::Result> { - self.try_to_vec().map_err(CodecError::from) + impl KeyDecoder for (StateKey, Version) { + fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { + use byteorder::ReadBytesExt; + let mut cursor = std::io::Cursor::new(data); + let key = Vec::::deserialize_reader(&mut cursor)?; + let version = cursor.read_u64::()?; + Ok((key, version)) + } } - fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { - Ok(Self::deserialize_reader(&mut &data[..])?) + impl ValueCodec for JmtValue { + fn encode_value(&self) -> sov_schema_db::schema::Result> { + self.try_to_vec().map_err(CodecError::from) + } + + fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { + Ok(Self::deserialize_reader(&mut &data[..])?) + } } } - -define_table_with_default_codec!( - /// A mapping from key-hashes to their preimages and latest version. Since we store raw - /// key-value pairs instead of keyHash->value pairs, - /// this table is required to implement the `jmt::TreeReader` trait, - /// which requires the ability to fetch values by hash. - (KeyHashToKey) [u8;32] => StateKey -); diff --git a/full-node/db/sov-db/src/schema/types.rs b/full-node/db/sov-db/src/schema/types.rs index 35591c1c7..fdd7955f2 100644 --- a/full-node/db/sov-db/src/schema/types.rs +++ b/full-node/db/sov-db/src/schema/types.rs @@ -1,15 +1,11 @@ use borsh::{BorshDeserialize, BorshSerialize}; -#[cfg(feature = "std")] -use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use sov_rollup_interface::maybestd::sync::Arc; use sov_rollup_interface::maybestd::vec::Vec; use sov_rollup_interface::rpc::TxIdentifier; -#[cfg(feature = "std")] -use sov_rollup_interface::rpc::{BatchResponse, TxResponse}; use sov_rollup_interface::stf::EventKey; #[cfg(feature = "std")] -use sov_rollup_interface::stf::{Event, TransactionReceipt}; +pub use use_std::split_tx_for_storage; /// A cheaply cloneable bytes abstraction for use within the trust boundary of the node /// (i.e. when interfacing with the database). Serializes and deserializes more efficiently, @@ -85,20 +81,6 @@ pub struct StoredBatch { pub custom_receipt: DbBytes, } -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -impl TryFrom for BatchResponse { - type Error = anyhow::Error; - fn try_from(value: StoredBatch) -> Result { - Ok(Self { - hash: value.hash, - custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, - tx_range: value.txs.start.into()..value.txs.end.into(), - txs: None, - }) - } -} - /// The on-disk format of a transaction. Includes the txhash, the serialized tx data, /// and identifies the events emitted by this transaction #[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, Clone)] @@ -113,39 +95,6 @@ pub struct StoredTransaction { pub custom_receipt: DbBytes, } -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -impl TryFrom for TxResponse { - type Error = anyhow::Error; - fn try_from(value: StoredTransaction) -> Result { - Ok(Self { - hash: value.hash, - event_range: value.events.start.into()..value.events.end.into(), - body: value.body, - custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, - }) - } -} - -/// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database. -// TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` -#[cfg(feature = "std")] -pub fn split_tx_for_storage( - tx: TransactionReceipt, - event_offset: u64, -) -> (StoredTransaction, Vec) { - let event_range = EventNumber(event_offset)..EventNumber(event_offset + tx.events.len() as u64); - let tx_for_storage = StoredTransaction { - hash: tx.tx_hash, - events: event_range, - body: tx.body_to_save, - custom_receipt: DbBytes::new( - bincode::serialize(&tx.receipt).expect("Serialization to vec is infallible"), - ), - }; - (tx_for_storage, tx.events) -} - /// An identifier that specifies a single event #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum EventIdentifier { @@ -209,6 +158,60 @@ u64_wrapper!(BatchNumber); u64_wrapper!(TxNumber); u64_wrapper!(EventNumber); +#[cfg(feature = "std")] +mod use_std { + use serde::de::DeserializeOwned; + use sov_rollup_interface::rpc::{BatchResponse, TxResponse}; + use sov_rollup_interface::stf::{Event, TransactionReceipt}; + + use super::*; + + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + impl TryFrom for BatchResponse { + type Error = anyhow::Error; + fn try_from(value: StoredBatch) -> Result { + Ok(Self { + hash: value.hash, + custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, + tx_range: value.txs.start.into()..value.txs.end.into(), + txs: None, + }) + } + } + + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + impl TryFrom for TxResponse { + type Error = anyhow::Error; + fn try_from(value: StoredTransaction) -> Result { + Ok(Self { + hash: value.hash, + event_range: value.events.start.into()..value.events.end.into(), + body: value.body, + custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, + }) + } + } + + /// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database. + // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` + pub fn split_tx_for_storage( + tx: TransactionReceipt, + event_offset: u64, + ) -> (StoredTransaction, Vec) { + let event_range = + EventNumber(event_offset)..EventNumber(event_offset + tx.events.len() as u64); + let tx_for_storage = StoredTransaction { + hash: tx.tx_hash, + events: event_range, + body: tx.body_to_save, + custom_receipt: DbBytes::new( + bincode::serialize(&tx.receipt).expect("Serialization to vec is infallible"), + ), + }; + (tx_for_storage, tx.events) + } +} + #[cfg(feature = "arbitrary")] pub mod arbitrary { //! Arbitrary definitions for the types. diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index 50ffb21af..7c8aae725 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,10 +1,9 @@ use sov_rollup_interface::maybestd::vec::Vec; - -use crate::schema::Schema; - #[cfg(feature = "std")] pub(crate) use use_std::ScanDirection; +use crate::schema::Schema; + /// This defines a type that can be used to seek a [`SchemaIterator`], via /// interfaces like [`SchemaIterator::seek`]. Mind you, not all /// [`KeyEncoder`](crate::schema::KeyEncoder)s shall be [`SeekKeyEncoder`]s, and @@ -39,7 +38,6 @@ pub struct SchemaIterator<'a, S> { #[cfg(feature = "std")] mod use_std { use super::*; - use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; use crate::schema::{KeyDecoder, ValueCodec}; From 1296c19d28c0e1dcf657189e6d6839de2f618207 Mon Sep 17 00:00:00 2001 From: Victor Lopes Date: Tue, 24 Oct 2023 15:48:27 +0200 Subject: [PATCH 17/28] Update rollup-interface/src/state_machine/mocks/zk_vm.rs Co-authored-by: Filippo Neysofu Costa --- rollup-interface/src/state_machine/mocks/zk_vm.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/rollup-interface/src/state_machine/mocks/zk_vm.rs b/rollup-interface/src/state_machine/mocks/zk_vm.rs index 8b33e0eee..92dabcacb 100644 --- a/rollup-interface/src/state_machine/mocks/zk_vm.rs +++ b/rollup-interface/src/state_machine/mocks/zk_vm.rs @@ -91,18 +91,21 @@ impl crate::zk::Zkvm for MockZkvm { Ok(bincode::deserialize(output)?) } - #[cfg(not(feature = "std"))] fn verify_and_extract_output< Add: crate::RollupAddress, Da: crate::da::DaSpec, Root: Serialize + serde::de::DeserializeOwned, >( - _serialized_proof: &[u8], - _code_commitment: &Self::CodeCommitment, + serialized_proof: &[u8], + code_commitment: &Self::CodeCommitment, ) -> Result, Self::Error> { - todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") + if cfg!(feature = "std") { + let output = Self::verify(serialized_proof, code_commitment)?; + Ok(bincode::deserialize(output)?) + } else { + todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") + } } -} #[test] fn test_mock_proof_roundtrip() { From b687b0c4b980c455ecb8d7da7b7bfcc7118b8755 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 16:39:46 +0200 Subject: [PATCH 18/28] revert sov-db & sov-schema-db --- Cargo.lock | 1 - full-node/db/sov-db/Cargo.toml | 32 +- full-node/db/sov-db/src/lib.rs | 6 - full-node/db/sov-db/src/schema/tables.rs | 261 +++++++------- full-node/db/sov-db/src/schema/types.rs | 112 +++--- full-node/db/sov-schema-db/Cargo.toml | 37 +- full-node/db/sov-schema-db/src/iterator.rs | 155 ++++---- full-node/db/sov-schema-db/src/lib.rs | 332 ++++++++++++++---- full-node/db/sov-schema-db/src/schema.rs | 4 +- .../src/state_machine/mocks/zk_vm.rs | 14 +- 10 files changed, 519 insertions(+), 435 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70b376ed8..c2bb5b7c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8891,7 +8891,6 @@ dependencies = [ "proptest", "proptest-derive", "rocksdb", - "sov-rollup-interface", "tempfile", "thiserror", "tracing", diff --git a/full-node/db/sov-db/Cargo.toml b/full-node/db/sov-db/Cargo.toml index 06aa54283..a1d661422 100644 --- a/full-node/db/sov-db/Cargo.toml +++ b/full-node/db/sov-db/Cargo.toml @@ -20,17 +20,17 @@ sov-schema-db = { path = "../sov-schema-db", version = "0.3" } sov-rollup-interface = { path = "../../../rollup-interface", version = "0.3", features = ["native", "mocks", "tokio"] } # External -anyhow = { workspace = true } +anyhow = { workspace = true, default-features = true } arbitrary = { workspace = true, optional = true } -bincode = { workspace = true, optional = true } -byteorder = { workspace = true } -borsh = { workspace = true } -proptest = { workspace = true, optional = true } +byteorder = { workspace = true, default-features = true } +borsh = { workspace = true, default-features = true } +proptest = { workspace = true, optional = true, default-features = true } proptest-derive = { workspace = true, optional = true } -rocksdb = { workspace = true, optional = true } -serde = { workspace = true } +serde = { workspace = true, default-features = true } tempfile = { workspace = true, optional = true } -tokio = { workspace = true, optional = true } +rocksdb = { workspace = true } +bincode = { workspace = true } +tokio = { workspace = true } [dev-dependencies] @@ -38,23 +38,9 @@ tempfile = { workspace = true } [features] -default = ["std", "tokio"] arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", - "dep:tempfile", - "std" + "dep:tempfile" ] -std = [ - "anyhow/default", - "bincode", - "byteorder/default", - "borsh/default", - "proptest?/default", - "rocksdb", - "serde/default", - "sov-schema-db/default", - "sov-rollup-interface/default", -] -tokio = ["dep:tokio", "std"] diff --git a/full-node/db/sov-db/src/lib.rs b/full-node/db/sov-db/src/lib.rs index a88520f56..b1e99aec4 100644 --- a/full-node/db/sov-db/src/lib.rs +++ b/full-node/db/sov-db/src/lib.rs @@ -4,28 +4,22 @@ //! - DB "Table" definitions can be found in the [`schema`] module //! - Types and traits for storing state data can be found in the [`state_db`] module //! - The default db configuration is generated in the [`rocks_db_config`] module - #![forbid(unsafe_code)] #![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] /// Implements a wrapper around RocksDB meant for storing rollup history ("the ledger"). /// This wrapper implements helper traits for writing blocks to the ledger, and for /// serving historical data via RPC -#[cfg(all(feature = "std", feature = "tokio"))] pub mod ledger_db; /// Implements helpers for configuring RocksDB. -#[cfg(feature = "std")] pub mod rocks_db_config; /// Defines the tables used by the Sovereign SDK. pub mod schema; /// Implements a wrapper around [RocksDB](https://rocksdb.org/) meant for storing rollup state. /// This is primarily used as the backing store for the [JMT(JellyfishMerkleTree)](https://docs.rs/jmt/latest/jmt/). -#[cfg(feature = "std")] pub mod state_db; /// Implements a wrapper around RocksDB meant for storing state only accessible /// outside of the zkVM execution environment, as this data is not included in /// the JMT and does not contribute to proofs of execution. -#[cfg(feature = "std")] pub mod native_db; diff --git a/full-node/db/sov-db/src/schema/tables.rs b/full-node/db/sov-db/src/schema/tables.rs index 17136a3c2..6f267d971 100644 --- a/full-node/db/sov-db/src/schema/tables.rs +++ b/full-node/db/sov-db/src/schema/tables.rs @@ -25,25 +25,23 @@ //! Module Accessory State Table: //! - `(ModuleAddress, Key) -> Value` -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::{maybestd, BorshDeserialize, BorshSerialize}; +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{Node, NodeKey}; -use sov_rollup_interface::maybestd::vec::Vec; -use sov_rollup_interface::stf::EventKey; +use jmt::Version; +use sov_rollup_interface::stf::{Event, EventKey}; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; -use sov_schema_db::CodecError; -#[cfg(feature = "std")] -pub(crate) use use_std::*; +use sov_schema_db::{CodecError, SeekKeyEncoder}; use super::types::{ - AccessoryKey, AccessoryStateValue, BatchNumber, DbHash, EventNumber, SlotNumber, StateKey, - TxNumber, + AccessoryKey, AccessoryStateValue, BatchNumber, DbHash, EventNumber, JmtValue, SlotNumber, + StateKey, StoredBatch, StoredSlot, StoredTransaction, TxNumber, }; /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. pub const STATE_TABLES: &[&str] = &[ KeyHashToKey::table_name(), - #[cfg(feature = "std")] JmtValues::table_name(), JmtNodes::table_name(), ]; @@ -51,17 +49,13 @@ pub const STATE_TABLES: &[&str] = &[ /// A list of all tables used by the LedgerDB. These tables store rollup "history" - meaning /// transaction, events, receipts, etc. pub const LEDGER_TABLES: &[&str] = &[ - #[cfg(feature = "std")] SlotByNumber::table_name(), SlotByHash::table_name(), BatchByHash::table_name(), - #[cfg(feature = "std")] BatchByNumber::table_name(), TxByHash::table_name(), - #[cfg(feature = "std")] TxByNumber::table_name(), EventByKey::table_name(), - #[cfg(feature = "std")] EventByNumber::table_name(), ]; @@ -110,8 +104,8 @@ macro_rules! define_table_without_codec { } } - impl ::core::fmt::Display for $table_name { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + impl ::std::fmt::Display for $table_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::core::write!(f, "{}", stringify!($table_name)) } } @@ -123,7 +117,7 @@ macro_rules! impl_borsh_value_codec { impl ::sov_schema_db::schema::ValueCodec<$table_name> for $value { fn encode_value( &self, - ) -> ::core::result::Result< + ) -> ::std::result::Result< ::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError, > { @@ -132,7 +126,7 @@ macro_rules! impl_borsh_value_codec { fn decode_value( data: &[u8], - ) -> ::core::result::Result { + ) -> ::std::result::Result { ::borsh::BorshDeserialize::deserialize_reader(&mut &data[..]).map_err(Into::into) } } @@ -154,13 +148,13 @@ macro_rules! define_table_with_default_codec { define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { - fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { ::borsh::BorshSerialize::try_to_vec(self).map_err(Into::into) } } impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { - fn decode_key(data: &[u8]) -> ::core::result::Result { + fn decode_key(data: &[u8]) -> ::std::result::Result { ::borsh::BorshDeserialize::deserialize_reader(&mut &data[..]).map_err(Into::into) } } @@ -169,6 +163,57 @@ macro_rules! define_table_with_default_codec { }; } +/// Macro similar to [`define_table_with_default_codec`], but to be used when +/// your key type should be [`SeekKeyEncoder`]. Borsh serializes integers as +/// little-endian, but RocksDB uses lexicographic ordering which is only +/// compatible with big-endian, so we use [`bincode`] with the big-endian option +/// here. +macro_rules! define_table_with_seek_key_codec { + ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { + define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); + + impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { + fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + use ::anyhow::Context as _; + use ::bincode::Options as _; + + let bincode_options = ::bincode::options() + .with_fixint_encoding() + .with_big_endian(); + + bincode_options.serialize(self).context("Failed to serialize key").map_err(Into::into) + } + } + + impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { + fn decode_key(data: &[u8]) -> ::std::result::Result { + use ::anyhow::Context as _; + use ::bincode::Options as _; + + let bincode_options = ::bincode::options() + .with_fixint_encoding() + .with_big_endian(); + + bincode_options.deserialize_from(&mut &data[..]).context("Failed to deserialize key").map_err(Into::into) + } + } + + impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { + fn encode_seek_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { + >::encode_key(self) + } + } + + impl_borsh_value_codec!($table_name, $value); + }; +} + +// fn deser(target: &mut &[u8]) -> Result; +define_table_with_seek_key_codec!( + /// The primary source for slot data + (SlotByNumber) SlotNumber => StoredSlot +); + define_table_with_default_codec!( /// A "secondary index" for slot data by hash (SlotByHash) DbHash => SlotNumber @@ -179,16 +224,31 @@ define_table_with_default_codec!( (ModuleAccessoryState) AccessoryKey => AccessoryStateValue ); +define_table_with_seek_key_codec!( + /// The primary source for batch data + (BatchByNumber) BatchNumber => StoredBatch +); + define_table_with_default_codec!( /// A "secondary index" for batch data by hash (BatchByHash) DbHash => BatchNumber ); +define_table_with_seek_key_codec!( + /// The primary source for transaction data + (TxByNumber) TxNumber => StoredTransaction +); + define_table_with_default_codec!( /// A "secondary index" for transaction data by hash (TxByHash) DbHash => TxNumber ); +define_table_with_seek_key_codec!( + /// The primary store for event data + (EventByNumber) EventNumber => Event +); + define_table_with_default_codec!( /// A "secondary index" for event data by key (EventByKey) (EventKey, TxNumber, EventNumber) => () @@ -220,138 +280,55 @@ impl ValueCodec for Node { } } -define_table_with_default_codec!( - /// A mapping from key-hashes to their preimages and latest version. Since we store raw - /// key-value pairs instead of keyHash->value pairs, - /// this table is required to implement the `jmt::TreeReader` trait, - /// which requires the ability to fetch values by hash. - (KeyHashToKey) [u8;32] => StateKey +define_table_without_codec!( + /// The source of truth for JMT values by version + (JmtValues) (StateKey, Version) => JmtValue ); -#[cfg(feature = "std")] -mod use_std { - use jmt::Version; - - use super::super::types::{JmtValue, StoredBatch, StoredSlot, StoredTransaction}; - use super::*; - - /// Macro similar to [`define_table_with_default_codec`], but to be used when - /// your key type should be [`SeekKeyEncoder`]. Borsh serializes integers as - /// little-endian, but RocksDB uses lexicographic ordering which is only - /// compatible with big-endian, so we use [`bincode`] with the big-endian option - /// here. - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - macro_rules! define_table_with_seek_key_codec { - ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { - define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); - - impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { - fn encode_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { - use ::anyhow::Context as _; - use ::bincode::Options as _; - - let bincode_options = ::bincode::options() - .with_fixint_encoding() - .with_big_endian(); - - bincode_options.serialize(self).context("Failed to serialize key").map_err(Into::into) - } - } - - impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { - fn decode_key(data: &[u8]) -> ::core::result::Result { - use ::anyhow::Context as _; - use ::bincode::Options as _; - - let bincode_options = ::bincode::options() - .with_fixint_encoding() - .with_big_endian(); - - bincode_options.deserialize_from(&mut &data[..]).context("Failed to deserialize key").map_err(Into::into) - } - } - - impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { - fn encode_seek_key(&self) -> ::core::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { - >::encode_key(self) - } - } - - impl_borsh_value_codec!($table_name, $value); - }; +impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { + fn encode_key(&self) -> sov_schema_db::schema::Result> { + let mut out = + Vec::with_capacity(self.0.as_ref().len() + std::mem::size_of::() + 8); + self.0 + .as_ref() + .serialize(&mut out) + .map_err(CodecError::from)?; + // Write the version in big-endian order so that sorting order is based on the most-significant bytes of the key + out.write_u64::(self.1) + .expect("serialization to vec is infallible"); + Ok(out) + } } - // fn deser(target: &mut &[u8]) -> Result; - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - define_table_with_seek_key_codec!( - /// The primary source for slot data - (SlotByNumber) SlotNumber => StoredSlot - ); - - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - define_table_with_seek_key_codec!( - /// The primary source for batch data - (BatchByNumber) BatchNumber => StoredBatch - ); - - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - define_table_with_seek_key_codec!( - /// The primary source for transaction data - (TxByNumber) TxNumber => StoredTransaction - ); - - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - define_table_with_seek_key_codec!( - /// The primary store for event data - (EventByNumber) EventNumber => sov_rollup_interface::stf::Event - ); - - define_table_without_codec!( - /// The source of truth for JMT values by version - (JmtValues) (StateKey, Version) => JmtValue - ); - - impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { - fn encode_key(&self) -> sov_schema_db::schema::Result> { - use byteorder::WriteBytesExt; - let mut out = - Vec::with_capacity(self.0.as_ref().len() + core::mem::size_of::() + 8); - self.0 - .as_ref() - .serialize(&mut out) - .map_err(CodecError::from)?; - // Write the version in big-endian order so that sorting order is based on the most-significant bytes of the key - out.write_u64::(self.1) - .expect("serialization to vec is infallible"); - Ok(out) - } +impl + PartialEq + core::fmt::Debug> SeekKeyEncoder for (T, Version) { + fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { + self.encode_key() } +} - impl + PartialEq + core::fmt::Debug> sov_schema_db::SeekKeyEncoder - for (T, Version) - { - fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { - self.encode_key() - } +impl KeyDecoder for (StateKey, Version) { + fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { + let mut cursor = maybestd::io::Cursor::new(data); + let key = Vec::::deserialize_reader(&mut cursor)?; + let version = cursor.read_u64::()?; + Ok((key, version)) } +} - impl KeyDecoder for (StateKey, Version) { - fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { - use byteorder::ReadBytesExt; - let mut cursor = std::io::Cursor::new(data); - let key = Vec::::deserialize_reader(&mut cursor)?; - let version = cursor.read_u64::()?; - Ok((key, version)) - } +impl ValueCodec for JmtValue { + fn encode_value(&self) -> sov_schema_db::schema::Result> { + self.try_to_vec().map_err(CodecError::from) } - impl ValueCodec for JmtValue { - fn encode_value(&self) -> sov_schema_db::schema::Result> { - self.try_to_vec().map_err(CodecError::from) - } - - fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { - Ok(Self::deserialize_reader(&mut &data[..])?) - } + fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { + Ok(Self::deserialize_reader(&mut &data[..])?) } } + +define_table_with_default_codec!( + /// A mapping from key-hashes to their preimages and latest version. Since we store raw + /// key-value pairs instead of keyHash->value pairs, + /// this table is required to implement the `jmt::TreeReader` trait, + /// which requires the ability to fetch values by hash. + (KeyHashToKey) [u8;32] => StateKey +); diff --git a/full-node/db/sov-db/src/schema/types.rs b/full-node/db/sov-db/src/schema/types.rs index fdd7955f2..d46d5d3d0 100644 --- a/full-node/db/sov-db/src/schema/types.rs +++ b/full-node/db/sov-db/src/schema/types.rs @@ -1,11 +1,10 @@ +use std::sync::Arc; + use borsh::{BorshDeserialize, BorshSerialize}; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use sov_rollup_interface::maybestd::sync::Arc; -use sov_rollup_interface::maybestd::vec::Vec; -use sov_rollup_interface::rpc::TxIdentifier; -use sov_rollup_interface::stf::EventKey; -#[cfg(feature = "std")] -pub use use_std::split_tx_for_storage; +use sov_rollup_interface::rpc::{BatchResponse, TxIdentifier, TxResponse}; +use sov_rollup_interface::stf::{Event, EventKey, TransactionReceipt}; /// A cheaply cloneable bytes abstraction for use within the trust boundary of the node /// (i.e. when interfacing with the database). Serializes and deserializes more efficiently, @@ -65,7 +64,7 @@ pub struct StoredSlot { /// Any extra data which the rollup decides to store relating to this slot. pub extra_data: DbBytes, /// The range of batches which occurred in this slot. - pub batches: core::ops::Range, + pub batches: std::ops::Range, } /// The on-disk format for a batch. Stores the hash and identifies the range of transactions @@ -76,11 +75,23 @@ pub struct StoredBatch { /// The hash of the batch, as reported by the DA layer. pub hash: DbHash, /// The range of transactions which occurred in this batch. - pub txs: core::ops::Range, + pub txs: std::ops::Range, /// A customer "receipt" for this batch defined by the rollup. pub custom_receipt: DbBytes, } +impl TryFrom for BatchResponse { + type Error = anyhow::Error; + fn try_from(value: StoredBatch) -> Result { + Ok(Self { + hash: value.hash, + custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, + tx_range: value.txs.start.into()..value.txs.end.into(), + txs: None, + }) + } +} + /// The on-disk format of a transaction. Includes the txhash, the serialized tx data, /// and identifies the events emitted by this transaction #[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, Clone)] @@ -88,13 +99,42 @@ pub struct StoredTransaction { /// The hash of the transaction. pub hash: DbHash, /// The range of event-numbers emitted by this transaction. - pub events: core::ops::Range, + pub events: std::ops::Range, /// The serialized transaction data, if the rollup decides to store it. pub body: Option>, /// A customer "receipt" for this transaction defined by the rollup. pub custom_receipt: DbBytes, } +impl TryFrom for TxResponse { + type Error = anyhow::Error; + fn try_from(value: StoredTransaction) -> Result { + Ok(Self { + hash: value.hash, + event_range: value.events.start.into()..value.events.end.into(), + body: value.body, + custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, + }) + } +} + +/// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database. +pub fn split_tx_for_storage( + tx: TransactionReceipt, + event_offset: u64, +) -> (StoredTransaction, Vec) { + let event_range = EventNumber(event_offset)..EventNumber(event_offset + tx.events.len() as u64); + let tx_for_storage = StoredTransaction { + hash: tx.tx_hash, + events: event_range, + body: tx.body_to_save, + custom_receipt: DbBytes::new( + bincode::serialize(&tx.receipt).expect("Serialization to vec is infallible"), + ), + }; + (tx_for_storage, tx.events) +} + /// An identifier that specifies a single event #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum EventIdentifier { @@ -158,60 +198,6 @@ u64_wrapper!(BatchNumber); u64_wrapper!(TxNumber); u64_wrapper!(EventNumber); -#[cfg(feature = "std")] -mod use_std { - use serde::de::DeserializeOwned; - use sov_rollup_interface::rpc::{BatchResponse, TxResponse}; - use sov_rollup_interface::stf::{Event, TransactionReceipt}; - - use super::*; - - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - impl TryFrom for BatchResponse { - type Error = anyhow::Error; - fn try_from(value: StoredBatch) -> Result { - Ok(Self { - hash: value.hash, - custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, - tx_range: value.txs.start.into()..value.txs.end.into(), - txs: None, - }) - } - } - - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - impl TryFrom for TxResponse { - type Error = anyhow::Error; - fn try_from(value: StoredTransaction) -> Result { - Ok(Self { - hash: value.hash, - event_range: value.events.start.into()..value.events.end.into(), - body: value.body, - custom_receipt: bincode::deserialize(&value.custom_receipt.0)?, - }) - } - } - - /// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database. - // TODO `bincode` is expected to have `no-std` soon; should be usable under `no-std` - pub fn split_tx_for_storage( - tx: TransactionReceipt, - event_offset: u64, - ) -> (StoredTransaction, Vec) { - let event_range = - EventNumber(event_offset)..EventNumber(event_offset + tx.events.len() as u64); - let tx_for_storage = StoredTransaction { - hash: tx.tx_hash, - events: event_range, - body: tx.body_to_save, - custom_receipt: DbBytes::new( - bincode::serialize(&tx.receipt).expect("Serialization to vec is infallible"), - ), - }; - (tx_for_storage, tx.events) - } -} - #[cfg(feature = "arbitrary")] pub mod arbitrary { //! Arbitrary definitions for the types. diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index 8454ff30c..a480672bc 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -15,41 +15,18 @@ readme = "README.md" [dependencies] # External dependencies anyhow = { workspace = true } -once_cell = { workspace = true, optional = true, default-features = true } -prometheus = { workspace = true, optional = true } +once_cell = { workspace = true } +prometheus = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -rocksdb = { workspace = true, optional = true } +rocksdb = { workspace = true } tracing = { workspace = true } -thiserror = { workspace = true, optional = true } - -sov-rollup-interface = { path = "../../../rollup-interface", default-features = false } +thiserror = { workspace = true } [dev-dependencies] -byteorder = { workspace = true, default-features = true } +byteorder = { workspace = true } tempfile = { workspace = true } [features] -default = ["std"] -arbitrary = [ - "dep:proptest", - "dep:proptest-derive", - "std" -] -std = [ - "anyhow/default", - "once_cell", - "prometheus", - "rocksdb", - "sov-rollup-interface/default", - "thiserror", - "tracing/default", -] - -[[test]] -name = "db_test" -required-features = ["std"] - -[[test]] -name = "iterator_test" -required-features = ["std"] +default = [] +arbitrary = ["dep:proptest", "dep:proptest-derive"] diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index 7c8aae725..be288e639 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,8 +1,10 @@ -use sov_rollup_interface::maybestd::vec::Vec; -#[cfg(feature = "std")] -pub(crate) use use_std::ScanDirection; +use std::iter::FusedIterator; +use std::marker::PhantomData; -use crate::schema::Schema; +use anyhow::Result; + +use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; +use crate::schema::{KeyDecoder, Schema, ValueCodec}; /// This defines a type that can be used to seek a [`SchemaIterator`], via /// interfaces like [`SchemaIterator::seek`]. Mind you, not all @@ -26,105 +28,96 @@ pub trait SeekKeyEncoder: Sized { fn encode_seek_key(&self) -> crate::schema::Result>; } +pub(crate) enum ScanDirection { + Forward, + Backward, +} + /// DB Iterator parameterized on [`Schema`] that seeks with [`Schema::Key`] and yields /// [`Schema::Key`] and [`Schema::Value`] pairs. -#[cfg(feature = "std")] pub struct SchemaIterator<'a, S> { db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection, - phantom: core::marker::PhantomData, + phantom: PhantomData, } -#[cfg(feature = "std")] -mod use_std { - use super::*; - use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; - use crate::schema::{KeyDecoder, ValueCodec}; +impl<'a, S> SchemaIterator<'a, S> +where + S: Schema, +{ + pub(crate) fn new(db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection) -> Self { + SchemaIterator { + db_iter, + direction, + phantom: PhantomData, + } + } - pub(crate) enum ScanDirection { - Forward, - Backward, + /// Seeks to the first key. + pub fn seek_to_first(&mut self) { + self.db_iter.seek_to_first(); } - impl<'a, S> core::iter::FusedIterator for SchemaIterator<'a, S> where S: Schema {} + /// Seeks to the last key. + pub fn seek_to_last(&mut self) { + self.db_iter.seek_to_last(); + } - impl<'a, S> Iterator for SchemaIterator<'a, S> - where - S: Schema, - { - type Item = anyhow::Result<(S::Key, S::Value)>; + /// Seeks to the first key whose binary representation is equal to or greater than that of the + /// `seek_key`. + pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> Result<()> { + let key = seek_key.encode_seek_key()?; + self.db_iter.seek(&key); + Ok(()) + } - fn next(&mut self) -> Option { - self.next_impl().transpose() - } + /// Seeks to the last key whose binary representation is less than or equal to that of the + /// `seek_key`. + /// + /// See example in [`RocksDB doc`](https://github.com/facebook/rocksdb/wiki/SeekForPrev). + pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> Result<()> { + let key = seek_key.encode_seek_key()?; + self.db_iter.seek_for_prev(&key); + Ok(()) } - impl<'a, S> SchemaIterator<'a, S> - where - S: Schema, - { - pub(crate) fn new(db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection) -> Self { - SchemaIterator { - db_iter, - direction, - phantom: core::marker::PhantomData, - } - } + fn next_impl(&mut self) -> Result> { + let _timer = SCHEMADB_ITER_LATENCY_SECONDS + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .start_timer(); - /// Seeks to the first key. - pub fn seek_to_first(&mut self) { - self.db_iter.seek_to_first(); + if !self.db_iter.valid() { + self.db_iter.status()?; + return Ok(None); } - /// Seeks to the last key. - pub fn seek_to_last(&mut self) { - self.db_iter.seek_to_last(); - } + let raw_key = self.db_iter.key().expect("db_iter.key() failed."); + let raw_value = self.db_iter.value().expect("db_iter.value() failed."); + SCHEMADB_ITER_BYTES + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .observe((raw_key.len() + raw_value.len()) as f64); - /// Seeks to the first key whose binary representation is equal to or greater than that of the - /// `seek_key`. - pub fn seek(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { - let key = seek_key.encode_seek_key()?; - self.db_iter.seek(&key); - Ok(()) - } + let key = >::decode_key(raw_key)?; + let value = >::decode_value(raw_value)?; - /// Seeks to the last key whose binary representation is less than or equal to that of the - /// `seek_key`. - /// - /// See example in [`RocksDB doc`](https://github.com/facebook/rocksdb/wiki/SeekForPrev). - pub fn seek_for_prev(&mut self, seek_key: &impl SeekKeyEncoder) -> anyhow::Result<()> { - let key = seek_key.encode_seek_key()?; - self.db_iter.seek_for_prev(&key); - Ok(()) + match self.direction { + ScanDirection::Forward => self.db_iter.next(), + ScanDirection::Backward => self.db_iter.prev(), } - fn next_impl(&mut self) -> anyhow::Result> { - let _timer = SCHEMADB_ITER_LATENCY_SECONDS - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .start_timer(); - - if !self.db_iter.valid() { - self.db_iter.status()?; - return Ok(None); - } - - let raw_key = self.db_iter.key().expect("db_iter.key() failed."); - let raw_value = self.db_iter.value().expect("db_iter.value() failed."); - - SCHEMADB_ITER_BYTES - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .observe((raw_key.len() + raw_value.len()) as f64); - - let key = >::decode_key(raw_key)?; - let value = >::decode_value(raw_value)?; + Ok(Some((key, value))) + } +} - match self.direction { - ScanDirection::Forward => self.db_iter.next(), - ScanDirection::Backward => self.db_iter.prev(), - } +impl<'a, S> Iterator for SchemaIterator<'a, S> +where + S: Schema, +{ + type Item = Result<(S::Key, S::Value)>; - Ok(Some((key, value))) - } + fn next(&mut self) -> Option { + self.next_impl().transpose() } } + +impl<'a, S> FusedIterator for SchemaIterator<'a, S> where S: Schema {} diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index 7983c1ab0..97d0ca3b8 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -3,7 +3,6 @@ #![forbid(unsafe_code)] #![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] //! This library implements a schematized DB on top of [RocksDB](https://rocksdb.org/). It makes //! sure all data passed in and out are structured according to predefined schemas and prevents @@ -15,29 +14,260 @@ //! [`define_schema!`] macro to define the schema name, the types of key and value, and name of the //! column family. -#[cfg(feature = "std")] -mod db; -#[cfg(feature = "std")] -pub use db::DB; mod iterator; -#[cfg(feature = "std")] mod metrics; pub mod schema; -#[cfg(feature = "std")] -pub use iterator::SchemaIterator; -pub use iterator::SeekKeyEncoder; -use sov_rollup_interface::maybestd::collections::HashMap; -use sov_rollup_interface::maybestd::io; -use sov_rollup_interface::maybestd::sync::Mutex; -use sov_rollup_interface::maybestd::vec::Vec; +use std::collections::HashMap; +use std::path::Path; +use std::sync::Mutex; + +use anyhow::format_err; +use iterator::ScanDirection; +pub use iterator::{SchemaIterator, SeekKeyEncoder}; +use metrics::{ + SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, + SCHEMADB_BATCH_PUT_LATENCY_SECONDS, SCHEMADB_DELETES, SCHEMADB_GET_BYTES, + SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, +}; +use rocksdb::ReadOptions; +pub use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; +use thiserror::Error; +use tracing::info; pub use crate::schema::Schema; use crate::schema::{ColumnFamilyName, KeyCodec, ValueCodec}; -#[derive(Debug, PartialEq, Eq, Hash)] -#[cfg_attr(not(feature = "std"), allow(dead_code))] +/// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to +/// [`Schema`]s. +#[derive(Debug)] +pub struct DB { + name: &'static str, // for logging + inner: rocksdb::DB, +} + +impl DB { + /// Opens a database backed by RocksDB, using the provided column family names and default + /// column family options. + pub fn open( + path: impl AsRef, + name: &'static str, + column_families: impl IntoIterator>, + db_opts: &rocksdb::Options, + ) -> anyhow::Result { + let db = DB::open_with_cfds( + db_opts, + path, + name, + column_families.into_iter().map(|cf_name| { + let mut cf_opts = rocksdb::Options::default(); + cf_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); + rocksdb::ColumnFamilyDescriptor::new(cf_name, cf_opts) + }), + )?; + Ok(db) + } + + /// Open RocksDB with the provided column family descriptors. + /// This allows to configure options for each column family. + pub fn open_with_cfds( + db_opts: &rocksdb::Options, + path: impl AsRef, + name: &'static str, + cfds: impl IntoIterator, + ) -> anyhow::Result { + let inner = rocksdb::DB::open_cf_descriptors(db_opts, path, cfds)?; + Ok(Self::log_construct(name, inner)) + } + + /// Open db in readonly mode. This db is completely static, so any writes that occur on the primary + /// after it has been opened will not be visible to the readonly instance. + pub fn open_cf_readonly( + opts: &rocksdb::Options, + path: impl AsRef, + name: &'static str, + cfs: Vec, + ) -> anyhow::Result { + let error_if_log_file_exists = false; + let inner = rocksdb::DB::open_cf_for_read_only(opts, path, cfs, error_if_log_file_exists)?; + + Ok(Self::log_construct(name, inner)) + } + + /// Open db in secondary mode. A secondary db is does not support writes, but can be dynamically caught up + /// to the primary instance by a manual call. See + /// for more details. + pub fn open_cf_as_secondary>( + opts: &rocksdb::Options, + primary_path: P, + secondary_path: P, + name: &'static str, + cfs: Vec, + ) -> anyhow::Result { + let inner = rocksdb::DB::open_cf_as_secondary(opts, primary_path, secondary_path, cfs)?; + Ok(Self::log_construct(name, inner)) + } + + fn log_construct(name: &'static str, inner: rocksdb::DB) -> DB { + info!(rocksdb_name = name, "Opened RocksDB."); + DB { name, inner } + } + + /// Reads single record by key. + pub fn get( + &self, + schema_key: &impl KeyCodec, + ) -> anyhow::Result> { + let _timer = SCHEMADB_GET_LATENCY_SECONDS + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .start_timer(); + + let k = schema_key.encode_key()?; + let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; + + let result = self.inner.get_cf(cf_handle, k)?; + SCHEMADB_GET_BYTES + .with_label_values(&[S::COLUMN_FAMILY_NAME]) + .observe(result.as_ref().map_or(0.0, |v| v.len() as f64)); + + result + .map(|raw_value| >::decode_value(&raw_value)) + .transpose() + .map_err(|err| err.into()) + } + + /// Writes single record. + pub fn put( + &self, + key: &impl KeyCodec, + value: &impl ValueCodec, + ) -> anyhow::Result<()> { + // Not necessary to use a batch, but we'd like a central place to bump counters. + // Used in tests only anyway. + let batch = SchemaBatch::new(); + batch.put::(key, value)?; + self.write_schemas(batch) + } + + fn iter_with_direction( + &self, + opts: ReadOptions, + direction: ScanDirection, + ) -> anyhow::Result> { + let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; + Ok(SchemaIterator::new( + self.inner.raw_iterator_cf_opt(cf_handle, opts), + direction, + )) + } + + /// Returns a forward [`SchemaIterator`] on a certain schema with the default read options. + pub fn iter(&self) -> anyhow::Result> { + self.iter_with_direction::(Default::default(), ScanDirection::Forward) + } + + /// Returns a forward [`SchemaIterator`] on a certain schema with the provided read options. + pub fn iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { + self.iter_with_direction::(opts, ScanDirection::Forward) + } + + /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. + pub fn rev_iter(&self) -> anyhow::Result> { + self.iter_with_direction::(Default::default(), ScanDirection::Backward) + } + + /// Returns a backward [`SchemaIterator`] on a certain schema with the provided read options. + pub fn rev_iter_with_opts( + &self, + opts: ReadOptions, + ) -> anyhow::Result> { + self.iter_with_direction::(opts, ScanDirection::Backward) + } + + /// Writes a group of records wrapped in a [`SchemaBatch`]. + pub fn write_schemas(&self, batch: SchemaBatch) -> anyhow::Result<()> { + let _timer = SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS + .with_label_values(&[self.name]) + .start_timer(); + let rows_locked = batch.rows.lock().expect("Lock must not be poisoned"); + + let mut db_batch = rocksdb::WriteBatch::default(); + for (cf_name, rows) in rows_locked.iter() { + let cf_handle = self.get_cf_handle(cf_name)?; + for write_op in rows { + match write_op { + WriteOp::Value { key, value } => db_batch.put_cf(cf_handle, key, value), + WriteOp::Deletion { key } => db_batch.delete_cf(cf_handle, key), + } + } + } + let serialized_size = db_batch.size_in_bytes(); + + self.inner.write_opt(db_batch, &default_write_options())?; + + // Bump counters only after DB write succeeds. + for (cf_name, rows) in rows_locked.iter() { + for write_op in rows { + match write_op { + WriteOp::Value { key, value } => { + SCHEMADB_PUT_BYTES + .with_label_values(&[cf_name]) + .observe((key.len() + value.len()) as f64); + } + WriteOp::Deletion { key: _ } => { + SCHEMADB_DELETES.with_label_values(&[cf_name]).inc(); + } + } + } + } + SCHEMADB_BATCH_COMMIT_BYTES + .with_label_values(&[self.name]) + .observe(serialized_size as f64); + + Ok(()) + } + + fn get_cf_handle(&self, cf_name: &str) -> anyhow::Result<&rocksdb::ColumnFamily> { + self.inner.cf_handle(cf_name).ok_or_else(|| { + format_err!( + "DB::cf_handle not found for column family name: {}", + cf_name + ) + }) + } + + /// Flushes [MemTable](https://github.com/facebook/rocksdb/wiki/MemTable) data. + /// This is only used for testing `get_approximate_sizes_cf` in unit tests. + pub fn flush_cf(&self, cf_name: &str) -> anyhow::Result<()> { + Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) + } + + /// Returns the current RocksDB property value for the provided column family name + /// and property name. + pub fn get_property(&self, cf_name: &str, property_name: &str) -> anyhow::Result { + self.inner + .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? + .ok_or_else(|| { + format_err!( + "Unable to get property \"{}\" of column family \"{}\".", + property_name, + cf_name, + ) + }) + } + + /// Creates new physical DB checkpoint in directory specified by `path`. + pub fn create_checkpoint>(&self, path: P) -> anyhow::Result<()> { + rocksdb::checkpoint::Checkpoint::new(&self.inner)?.create_checkpoint(path)?; + Ok(()) + } +} + #[cfg_attr(feature = "arbitrary", derive(proptest_derive::Arbitrary))] +#[derive(Debug, PartialEq, Eq, Hash)] enum WriteOp { Value { key: Vec, value: Vec }, Deletion { key: Vec }, @@ -63,15 +293,11 @@ impl SchemaBatch { key: &impl KeyCodec, value: &impl ValueCodec, ) -> anyhow::Result<()> { - #[cfg(feature = "std")] - let _timer = metrics::SCHEMADB_BATCH_PUT_LATENCY_SECONDS + let _timer = SCHEMADB_BATCH_PUT_LATENCY_SECONDS .with_label_values(&["unknown"]) .start_timer(); - let key = key.encode_key()?; let value = value.encode_value()?; - - #[cfg(feature = "std")] self.rows .lock() .expect("Lock must not be poisoned") @@ -79,21 +305,12 @@ impl SchemaBatch { .or_default() .push(WriteOp::Value { key, value }); - #[cfg(not(feature = "std"))] - self.rows - .lock() - .entry(S::COLUMN_FAMILY_NAME) - .or_default() - .push(WriteOp::Value { key, value }); - Ok(()) } /// Adds a delete operation to the batch. pub fn delete(&self, key: &impl KeyCodec) -> anyhow::Result<()> { let key = key.encode_key()?; - - #[cfg(feature = "std")] self.rows .lock() .expect("Lock must not be poisoned") @@ -101,13 +318,6 @@ impl SchemaBatch { .or_default() .push(WriteOp::Deletion { key }); - #[cfg(not(feature = "std"))] - self.rows - .lock() - .entry(S::COLUMN_FAMILY_NAME) - .or_default() - .push(WriteOp::Deletion { key }); - Ok(()) } } @@ -137,51 +347,27 @@ impl proptest::arbitrary::Arbitrary for SchemaBatch { /// An error that occurred during (de)serialization of a [`Schema`]'s keys or /// values. -#[derive(Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Error, Debug)] pub enum CodecError { /// Unable to deserialize a key because it has a different length than /// expected. - #[cfg_attr( - feature = "std", - error("Invalid key length. Expected {expected:}, got {got:}") - )] + #[error("Invalid key length. Expected {expected:}, got {got:}")] #[allow(missing_docs)] // The fields' names are self-explanatory. InvalidKeyLength { expected: usize, got: usize }, /// Some other error occurred when (de)serializing a key or value. Inspect /// the inner [`anyhow::Error`] for more details. - #[cfg_attr(feature = "std", error(transparent))] - Wrapped(#[cfg_attr(feature = "std", from)] anyhow::Error), + #[error(transparent)] + Wrapped(#[from] anyhow::Error), /// I/O error. - #[cfg_attr(feature = "std", error(transparent))] - Io(#[cfg_attr(feature = "std", from)] io::Error), + #[error(transparent)] + Io(#[from] std::io::Error), } -#[cfg(not(feature = "std"))] -mod no_std { - use super::*; - - impl core::fmt::Display for CodecError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{:?}", self) - } - } - - impl From for anyhow::Error { - fn from(e: CodecError) -> Self { - anyhow::Error::msg(e) - } - } - - impl From for CodecError { - fn from(e: anyhow::Error) -> Self { - CodecError::Wrapped(e) - } - } - - impl From for CodecError { - fn from(e: io::Error) -> Self { - CodecError::Io(e) - } - } +/// For now we always use synchronous writes. This makes sure that once the operation returns +/// `Ok(())` the data is persisted even if the machine crashes. In the future we might consider +/// selectively turning this off for some non-critical writes to improve performance. +fn default_write_options() -> rocksdb::WriteOptions { + let mut opts = rocksdb::WriteOptions::default(); + opts.set_sync(true); + opts } diff --git a/full-node/db/sov-schema-db/src/schema.rs b/full-node/db/sov-schema-db/src/schema.rs index fd45c2f29..37ae39e76 100644 --- a/full-node/db/sov-schema-db/src/schema.rs +++ b/full-node/db/sov-schema-db/src/schema.rs @@ -5,9 +5,7 @@ //! A type-safe interface over [`DB`](crate::DB) column families. -use core::fmt::Debug; - -use sov_rollup_interface::maybestd::vec::Vec; +use std::fmt::Debug; use crate::CodecError; diff --git a/rollup-interface/src/state_machine/mocks/zk_vm.rs b/rollup-interface/src/state_machine/mocks/zk_vm.rs index 92dabcacb..35c96c4ca 100644 --- a/rollup-interface/src/state_machine/mocks/zk_vm.rs +++ b/rollup-interface/src/state_machine/mocks/zk_vm.rs @@ -78,19 +78,6 @@ impl crate::zk::Zkvm for MockZkvm { Ok(proof.log) } - #[cfg(feature = "std")] - fn verify_and_extract_output< - Add: crate::RollupAddress, - Da: crate::da::DaSpec, - Root: Serialize + serde::de::DeserializeOwned, - >( - serialized_proof: &[u8], - code_commitment: &Self::CodeCommitment, - ) -> Result, Self::Error> { - let output = Self::verify(serialized_proof, code_commitment)?; - Ok(bincode::deserialize(output)?) - } - fn verify_and_extract_output< Add: crate::RollupAddress, Da: crate::da::DaSpec, @@ -106,6 +93,7 @@ impl crate::zk::Zkvm for MockZkvm { todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") } } +} #[test] fn test_mock_proof_roundtrip() { From 0fe421d31cbb2a9d08a165a96684be5dd9e687ba Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 16:44:23 +0200 Subject: [PATCH 19/28] restore split impl for std bincode --- .../src/state_machine/mocks/zk_vm.rs | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/rollup-interface/src/state_machine/mocks/zk_vm.rs b/rollup-interface/src/state_machine/mocks/zk_vm.rs index 35c96c4ca..8b33e0eee 100644 --- a/rollup-interface/src/state_machine/mocks/zk_vm.rs +++ b/rollup-interface/src/state_machine/mocks/zk_vm.rs @@ -78,6 +78,7 @@ impl crate::zk::Zkvm for MockZkvm { Ok(proof.log) } + #[cfg(feature = "std")] fn verify_and_extract_output< Add: crate::RollupAddress, Da: crate::da::DaSpec, @@ -86,12 +87,20 @@ impl crate::zk::Zkvm for MockZkvm { serialized_proof: &[u8], code_commitment: &Self::CodeCommitment, ) -> Result, Self::Error> { - if cfg!(feature = "std") { - let output = Self::verify(serialized_proof, code_commitment)?; - Ok(bincode::deserialize(output)?) - } else { - todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") - } + let output = Self::verify(serialized_proof, code_commitment)?; + Ok(bincode::deserialize(output)?) + } + + #[cfg(not(feature = "std"))] + fn verify_and_extract_output< + Add: crate::RollupAddress, + Da: crate::da::DaSpec, + Root: Serialize + serde::de::DeserializeOwned, + >( + _serialized_proof: &[u8], + _code_commitment: &Self::CodeCommitment, + ) -> Result, Self::Error> { + todo!("the current version of bincode doesn't support no-std; however, the next version is scheduled to") } } From 57eee14c6d5afb9302b30b237a51948647f1d51a Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 17:03:12 +0200 Subject: [PATCH 20/28] reorg sov-schema-db manifest w/ default-features --- full-node/db/sov-schema-db/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index a480672bc..9a8d0b936 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -14,17 +14,17 @@ readme = "README.md" [dependencies] # External dependencies -anyhow = { workspace = true } -once_cell = { workspace = true } +anyhow = { workspace = true, default-features = true } +once_cell = { workspace = true, default-features = true } prometheus = { workspace = true } -proptest = { workspace = true, optional = true } +proptest = { workspace = true, optional = true, default-features = true } proptest-derive = { workspace = true, optional = true } rocksdb = { workspace = true } -tracing = { workspace = true } +tracing = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] -byteorder = { workspace = true } +byteorder = { workspace = true, default-features = true } tempfile = { workspace = true } [features] From cf50df4eb31636630a92442db42a96f35c71ab22 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 20:29:25 +0200 Subject: [PATCH 21/28] move da-mock-service to its own module --- .../src/state_machine/mocks/da.rs | 82 ------------------- .../src/state_machine/mocks/mod.rs | 6 +- .../src/state_machine/mocks/service.rs | 76 +++++++++++++++++ 3 files changed, 80 insertions(+), 84 deletions(-) create mode 100644 rollup-interface/src/state_machine/mocks/service.rs diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index 597baf57b..c88eb0c36 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -1,8 +1,6 @@ use core::fmt::Display; use core::str::FromStr; -#[cfg(all(feature = "native", feature = "tokio"))] -use async_trait::async_trait; use borsh::{BorshDeserialize, BorshSerialize}; use bytes::Bytes; use serde::{Deserialize, Serialize}; @@ -11,12 +9,8 @@ use crate::da::{ BlobReaderTrait, BlockHashTrait, BlockHeaderTrait, CountedBufReader, DaSpec, DaVerifier, Time, }; use crate::maybestd::string::String; -#[cfg(all(feature = "native", feature = "tokio"))] -use crate::maybestd::sync::Arc; use crate::maybestd::vec::Vec; use crate::mocks::MockValidityCond; -#[cfg(all(feature = "native", feature = "tokio"))] -use crate::services::da::DaService; use crate::services::da::SlotData; use crate::{BasicAddress, RollupAddress}; @@ -304,82 +298,6 @@ impl DaSpec for MockDaSpec { type ChainParams = (); } -#[cfg(all(feature = "native", feature = "tokio"))] -use tokio::sync::mpsc::{self, Receiver, Sender}; -#[cfg(all(feature = "native", feature = "tokio"))] -use tokio::sync::Mutex; - -#[cfg(all(feature = "native", feature = "tokio"))] -#[derive(Clone)] -/// DaService used in tests. -pub struct MockDaService { - sender: Sender>, - receiver: Arc>>>, - sequencer_da_address: MockAddress, -} - -#[cfg(all(feature = "native", feature = "tokio"))] -impl MockDaService { - /// Creates a new MockDaService. - pub fn new(sequencer_da_address: MockAddress) -> Self { - let (sender, receiver) = mpsc::channel(100); - Self { - sender, - receiver: Arc::new(Mutex::new(receiver)), - sequencer_da_address, - } - } -} - -#[cfg(all(feature = "native", feature = "tokio"))] -#[async_trait] -impl DaService for MockDaService { - type Spec = MockDaSpec; - type Verifier = MockDaVerifier; - type FilteredBlock = MockBlock; - type Error = anyhow::Error; - - async fn get_finalized_at(&self, _height: u64) -> Result { - let data = self.receiver.lock().await.recv().await; - let data = data.unwrap(); - let hash = [0; 32]; - - let blob = MockBlob::new(data, self.sequencer_da_address, hash); - - Ok(MockBlock { - blobs: vec![blob], - ..Default::default() - }) - } - - async fn get_block_at(&self, height: u64) -> Result { - self.get_finalized_at(height).await - } - - fn extract_relevant_blobs( - &self, - block: &Self::FilteredBlock, - ) -> Vec<::BlobTransaction> { - block.blobs.clone() - } - - async fn get_extraction_proof( - &self, - _block: &Self::FilteredBlock, - _blobs: &[::BlobTransaction], - ) -> ( - ::InclusionMultiProof, - ::CompletenessProof, - ) { - ([0u8; 32], ()) - } - - async fn send_transaction(&self, blob: &[u8]) -> Result<(), Self::Error> { - self.sender.send(blob.to_vec()).await.unwrap(); - Ok(()) - } -} - /// The configuration for mock da #[derive(Debug, Clone, PartialEq, serde::Deserialize, serde::Serialize)] pub struct MockDaConfig { diff --git a/rollup-interface/src/state_machine/mocks/mod.rs b/rollup-interface/src/state_machine/mocks/mod.rs index 613e5f750..da23f093a 100644 --- a/rollup-interface/src/state_machine/mocks/mod.rs +++ b/rollup-interface/src/state_machine/mocks/mod.rs @@ -2,13 +2,15 @@ //! for testing, fuzzing, and benchmarking. mod da; +#[cfg(all(feature = "native", feature = "tokio"))] +mod service; mod validity_condition; mod zk_vm; -#[cfg(all(feature = "native", feature = "tokio"))] -pub use da::MockDaService; pub use da::{ MockAddress, MockBlob, MockBlock, MockBlockHeader, MockDaConfig, MockDaSpec, MockDaVerifier, MockHash, MOCK_SEQUENCER_DA_ADDRESS, }; +#[cfg(all(feature = "native", feature = "tokio"))] +pub use service::MockDaService; pub use validity_condition::{MockValidityCond, MockValidityCondChecker}; pub use zk_vm::{MockCodeCommitment, MockProof, MockZkvm}; diff --git a/rollup-interface/src/state_machine/mocks/service.rs b/rollup-interface/src/state_machine/mocks/service.rs new file mode 100644 index 000000000..31292cdd1 --- /dev/null +++ b/rollup-interface/src/state_machine/mocks/service.rs @@ -0,0 +1,76 @@ +use async_trait::async_trait; +use tokio::sync::mpsc::{self, Receiver, Sender}; +use tokio::sync::Mutex; + +use crate::da::DaSpec; +use crate::maybestd::sync::Arc; +use crate::mocks::{MockAddress, MockBlob, MockBlock, MockDaSpec, MockDaVerifier}; +use crate::services::da::DaService; + +#[derive(Clone)] +/// DaService used in tests. +pub struct MockDaService { + sender: Sender>, + receiver: Arc>>>, + sequencer_da_address: MockAddress, +} + +impl MockDaService { + /// Creates a new MockDaService. + pub fn new(sequencer_da_address: MockAddress) -> Self { + let (sender, receiver) = mpsc::channel(100); + Self { + sender, + receiver: Arc::new(Mutex::new(receiver)), + sequencer_da_address, + } + } +} + +#[async_trait] +impl DaService for MockDaService { + type Spec = MockDaSpec; + type Verifier = MockDaVerifier; + type FilteredBlock = MockBlock; + type Error = anyhow::Error; + + async fn get_finalized_at(&self, _height: u64) -> Result { + let data = self.receiver.lock().await.recv().await; + let data = data.unwrap(); + let hash = [0; 32]; + + let blob = MockBlob::new(data, self.sequencer_da_address, hash); + + Ok(MockBlock { + blobs: vec![blob], + ..Default::default() + }) + } + + async fn get_block_at(&self, height: u64) -> Result { + self.get_finalized_at(height).await + } + + fn extract_relevant_blobs( + &self, + block: &Self::FilteredBlock, + ) -> Vec<::BlobTransaction> { + block.blobs.clone() + } + + async fn get_extraction_proof( + &self, + _block: &Self::FilteredBlock, + _blobs: &[::BlobTransaction], + ) -> ( + ::InclusionMultiProof, + ::CompletenessProof, + ) { + ([0u8; 32], ()) + } + + async fn send_transaction(&self, blob: &[u8]) -> Result<(), Self::Error> { + self.sender.send(blob.to_vec()).await.unwrap(); + Ok(()) + } +} From ed95780d887ed144e2618b2e932da13932c92428 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Tue, 24 Oct 2023 21:17:29 +0200 Subject: [PATCH 22/28] upd cargo lock --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 711305036..9bbdcc687 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8835,6 +8835,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", + "spin 0.9.8", "thiserror", "tokio", ] From 31e4245fc06886bac4bb217ce06bd5483eb2f9cc Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 12:28:34 +0200 Subject: [PATCH 23/28] remote stall file --- full-node/db/sov-schema-db/src/db.rs | 250 --------------------------- 1 file changed, 250 deletions(-) delete mode 100644 full-node/db/sov-schema-db/src/db.rs diff --git a/full-node/db/sov-schema-db/src/db.rs b/full-node/db/sov-schema-db/src/db.rs deleted file mode 100644 index c245337c1..000000000 --- a/full-node/db/sov-schema-db/src/db.rs +++ /dev/null @@ -1,250 +0,0 @@ -use std::path::Path; - -use anyhow::format_err; -use rocksdb::ReadOptions; -use tracing::info; - -use crate::iterator::{ScanDirection, SchemaIterator}; -use crate::metrics::{ - SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, SCHEMADB_DELETES, - SCHEMADB_GET_BYTES, SCHEMADB_GET_LATENCY_SECONDS, SCHEMADB_PUT_BYTES, -}; -use crate::schema::{ColumnFamilyName, KeyCodec, Schema, ValueCodec}; -use crate::{SchemaBatch, WriteOp}; - -/// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to -/// [`Schema`]s. -#[derive(Debug)] -pub struct DB { - name: &'static str, // for logging - inner: rocksdb::DB, -} - -impl DB { - /// Opens a database backed by RocksDB, using the provided column family names and default - /// column family options. - pub fn open( - path: impl AsRef, - name: &'static str, - column_families: impl IntoIterator>, - db_opts: &rocksdb::Options, - ) -> anyhow::Result { - let db = DB::open_with_cfds( - db_opts, - path, - name, - column_families.into_iter().map(|cf_name| { - let mut cf_opts = rocksdb::Options::default(); - cf_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); - rocksdb::ColumnFamilyDescriptor::new(cf_name, cf_opts) - }), - )?; - Ok(db) - } - - /// Open RocksDB with the provided column family descriptors. - /// This allows to configure options for each column family. - pub fn open_with_cfds( - db_opts: &rocksdb::Options, - path: impl AsRef, - name: &'static str, - cfds: impl IntoIterator, - ) -> anyhow::Result { - let inner = rocksdb::DB::open_cf_descriptors(db_opts, path, cfds)?; - Ok(Self::log_construct(name, inner)) - } - - /// Open db in readonly mode. This db is completely static, so any writes that occur on the primary - /// after it has been opened will not be visible to the readonly instance. - pub fn open_cf_readonly( - opts: &rocksdb::Options, - path: impl AsRef, - name: &'static str, - cfs: Vec, - ) -> anyhow::Result { - let error_if_log_file_exists = false; - let inner = rocksdb::DB::open_cf_for_read_only(opts, path, cfs, error_if_log_file_exists)?; - - Ok(Self::log_construct(name, inner)) - } - - /// Open db in secondary mode. A secondary db is does not support writes, but can be dynamically caught up - /// to the primary instance by a manual call. See - /// for more details. - pub fn open_cf_as_secondary>( - opts: &rocksdb::Options, - primary_path: P, - secondary_path: P, - name: &'static str, - cfs: Vec, - ) -> anyhow::Result { - let inner = rocksdb::DB::open_cf_as_secondary(opts, primary_path, secondary_path, cfs)?; - Ok(Self::log_construct(name, inner)) - } - - fn log_construct(name: &'static str, inner: rocksdb::DB) -> DB { - info!(rocksdb_name = name, "Opened RocksDB."); - DB { name, inner } - } - - /// Reads single record by key. - pub fn get( - &self, - schema_key: &impl KeyCodec, - ) -> anyhow::Result> { - let _timer = SCHEMADB_GET_LATENCY_SECONDS - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .start_timer(); - - let k = schema_key.encode_key()?; - let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; - - let result = self.inner.get_cf(cf_handle, k)?; - SCHEMADB_GET_BYTES - .with_label_values(&[S::COLUMN_FAMILY_NAME]) - .observe(result.as_ref().map_or(0.0, |v| v.len() as f64)); - - result - .map(|raw_value| >::decode_value(&raw_value)) - .transpose() - .map_err(|err| err.into()) - } - - /// Writes single record. - pub fn put( - &self, - key: &impl KeyCodec, - value: &impl ValueCodec, - ) -> anyhow::Result<()> { - // Not necessary to use a batch, but we'd like a central place to bump counters. - // Used in tests only anyway. - let batch = SchemaBatch::new(); - batch.put::(key, value)?; - self.write_schemas(batch) - } - - fn iter_with_direction( - &self, - opts: ReadOptions, - direction: ScanDirection, - ) -> anyhow::Result> { - let cf_handle = self.get_cf_handle(S::COLUMN_FAMILY_NAME)?; - Ok(SchemaIterator::new( - self.inner.raw_iterator_cf_opt(cf_handle, opts), - direction, - )) - } - - /// Returns a forward [`SchemaIterator`] on a certain schema with the default read options. - pub fn iter(&self) -> anyhow::Result> { - self.iter_with_direction::(Default::default(), ScanDirection::Forward) - } - - /// Returns a forward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn iter_with_opts( - &self, - opts: ReadOptions, - ) -> anyhow::Result> { - self.iter_with_direction::(opts, ScanDirection::Forward) - } - - /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. - pub fn rev_iter(&self) -> anyhow::Result> { - self.iter_with_direction::(Default::default(), ScanDirection::Backward) - } - - /// Returns a backward [`SchemaIterator`] on a certain schema with the provided read options. - pub fn rev_iter_with_opts( - &self, - opts: ReadOptions, - ) -> anyhow::Result> { - self.iter_with_direction::(opts, ScanDirection::Backward) - } - - /// Writes a group of records wrapped in a [`SchemaBatch`]. - pub fn write_schemas(&self, batch: SchemaBatch) -> anyhow::Result<()> { - let _timer = SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS - .with_label_values(&[self.name]) - .start_timer(); - let rows_locked = batch.rows.lock().expect("Lock must not be poisoned"); - - let mut db_batch = rocksdb::WriteBatch::default(); - for (cf_name, rows) in rows_locked.iter() { - let cf_handle = self.get_cf_handle(cf_name)?; - for write_op in rows { - match write_op { - WriteOp::Value { key, value } => db_batch.put_cf(cf_handle, key, value), - WriteOp::Deletion { key } => db_batch.delete_cf(cf_handle, key), - } - } - } - let serialized_size = db_batch.size_in_bytes(); - - self.inner.write_opt(db_batch, &default_write_options())?; - - // Bump counters only after DB write succeeds. - for (cf_name, rows) in rows_locked.iter() { - for write_op in rows { - match write_op { - WriteOp::Value { key, value } => { - SCHEMADB_PUT_BYTES - .with_label_values(&[cf_name]) - .observe((key.len() + value.len()) as f64); - } - WriteOp::Deletion { key: _ } => { - SCHEMADB_DELETES.with_label_values(&[cf_name]).inc(); - } - } - } - } - SCHEMADB_BATCH_COMMIT_BYTES - .with_label_values(&[self.name]) - .observe(serialized_size as f64); - - Ok(()) - } - - fn get_cf_handle(&self, cf_name: &str) -> anyhow::Result<&rocksdb::ColumnFamily> { - self.inner.cf_handle(cf_name).ok_or_else(|| { - format_err!( - "DB::cf_handle not found for column family name: {}", - cf_name - ) - }) - } - - /// Flushes [MemTable](https://github.com/facebook/rocksdb/wiki/MemTable) data. - /// This is only used for testing `get_approximate_sizes_cf` in unit tests. - pub fn flush_cf(&self, cf_name: &str) -> anyhow::Result<()> { - Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) - } - - /// Returns the current RocksDB property value for the provided column family name - /// and property name. - pub fn get_property(&self, cf_name: &str, property_name: &str) -> anyhow::Result { - self.inner - .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? - .ok_or_else(|| { - format_err!( - "Unable to get property \"{}\" of column family \"{}\".", - property_name, - cf_name, - ) - }) - } - - /// Creates new physical DB checkpoint in directory specified by `path`. - pub fn create_checkpoint>(&self, path: P) -> anyhow::Result<()> { - rocksdb::checkpoint::Checkpoint::new(&self.inner)?.create_checkpoint(path)?; - Ok(()) - } -} - -/// For now we always use synchronous writes. This makes sure that once the operation returns -/// `Ok(())` the data is persisted even if the machine crashes. In the future we might consider -/// selectively turning this off for some non-critical writes to improve performance. -fn default_write_options() -> rocksdb::WriteOptions { - let mut opts = rocksdb::WriteOptions::default(); - opts.set_sync(true); - opts -} From 5320fd82855c021db47f73268db11ab4b71ed053 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 14:56:20 +0200 Subject: [PATCH 24/28] add no-std CI check --- .github/workflows/rust.yml | 22 +++ Cargo.lock | 1 - Cargo.toml | 5 +- Makefile | 16 +++ .../provers/risc0/guest-celestia/Cargo.lock | 26 ---- .../provers/risc0/guest-mock/Cargo.lock | 26 ---- module-system/sov-state/Cargo.toml | 4 +- rollup-interface/Cargo.toml | 8 +- rollup-interface/src/lib.rs | 16 +-- rollup-interface/src/state_machine/da.rs | 11 +- .../src/state_machine/mocks/da.rs | 135 +----------------- .../src/state_machine/mocks/mod.rs | 8 +- .../src/state_machine/mocks/use_std.rs | 134 +++++++++++++++++ rollup-interface/src/state_machine/mod.rs | 1 + 14 files changed, 199 insertions(+), 214 deletions(-) create mode 100644 rollup-interface/src/state_machine/mocks/use_std.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 085ac1848..dc5a0a293 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -122,6 +122,28 @@ jobs: echo "Linting or formatting errors detected, please run 'make lint-fix' to fix it"; exit 1 fi + + check_no_std: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + - uses: Swatinem/rust-cache@v2 + with: + cache-provider: "buildjet" + shared-key: cargo-check-cache + save-if: ${{ github.ref == 'refs/heads/nightly' }} + workspaces: | + . + fuzz + - name: Run check + run: make check-no-std + # Check that every combination of features is working properly. hack: name: features diff --git a/Cargo.lock b/Cargo.lock index 9bbdcc687..711305036 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8835,7 +8835,6 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "spin 0.9.8", "thiserror", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 1d3fd5a57..5f613d275 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ jmt = "0.8.0" async-trait = "0.1.71" anyhow = { version = "1.0.68", default-features = false } arbitrary = { version = "1.3.1", features = ["derive"] } -borsh = { version = "0.10.3", default-features = false, features = ["rc", "bytes"] } +borsh = { version = "0.10.3", default-features = false } # TODO: Consider replacing this serialization format # https://github.com/Sovereign-Labs/sovereign-sdk/issues/283 bincode = "1.3.3" @@ -80,10 +80,9 @@ proptest-derive = "0.3.0" rand = "0.8" rayon = "1.8.0" rocksdb = { version = "0.21.0", features = ["lz4"] } -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } serde_json = { version = "1.0", default-features = false, features = ["alloc"] } sha2 = { version = "0.10.6", default-features = false } -spin = "0.9.8" thiserror = "1.0.50" tiny-keccak = "2.0.2" tracing = { version = "0.1.40", default-features = false } diff --git a/Makefile b/Makefile index cd154eced..d2f9c8b26 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,6 @@ +# no-std packages to be checked +NO_STD := "sov-rollup-interface" + .PHONY: help help: ## Display this help message @@ -23,6 +26,7 @@ install-dev-tools: ## Installs all necessary cargo helpers cargo install cargo-nextest --locked cargo install cargo-risczero cargo risczero install + rustup target add thumbv6m-none-eabi lint: ## cargo check and clippy. Skip clippy on guest code since it's not supported by risc0 ## fmt first, because it's the cheapest @@ -42,6 +46,18 @@ check-features: ## Checks that project compiles with all combinations of feature check-fuzz: ## Checks that fuzz member compiles $(MAKE) -C fuzz check +check-no-std: ## Checks that project compiles without std + @for package in $(NO_STD); do \ + echo "Checking no-std $${package}..."; \ + cargo check -p $$package \ + --target thumbv6m-none-eabi \ + --no-default-features ; \ + cargo check -p $$package \ + --target thumbv6m-none-eabi \ + --no-default-features \ + --features native ; \ + done + find-unused-deps: ## Prints unused dependencies for project. Note: requires nightly cargo udeps --all-targets --all-features diff --git a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock index 1ba672c50..07b25135e 100644 --- a/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-celestia/Cargo.lock @@ -1112,16 +1112,6 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" -[[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "log" version = "0.4.20" @@ -1830,12 +1820,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "semver" version = "0.11.0" @@ -2172,7 +2156,6 @@ dependencies = [ "proptest", "serde", "sha2 0.10.8", - "spin", "thiserror", ] @@ -2248,15 +2231,6 @@ dependencies = [ "risc0-zkvm-platform", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - [[package]] name = "spki" version = "0.7.2" diff --git a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock index f053cecf0..c18d9b680 100644 --- a/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock +++ b/examples/demo-rollup/provers/risc0/guest-mock/Cargo.lock @@ -524,16 +524,6 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" -[[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "log" version = "0.4.20" @@ -901,12 +891,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "semver" version = "1.0.18" @@ -1155,7 +1139,6 @@ dependencies = [ "proptest", "serde", "sha2", - "spin", "thiserror", ] @@ -1220,15 +1203,6 @@ dependencies = [ "risc0-zkvm-platform", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - [[package]] name = "subtle" version = "2.5.0" diff --git a/module-system/sov-state/Cargo.toml b/module-system/sov-state/Cargo.toml index 6e4874419..89596a8e3 100644 --- a/module-system/sov-state/Cargo.toml +++ b/module-system/sov-state/Cargo.toml @@ -14,11 +14,11 @@ resolver = "2" [dependencies] anyhow = { workspace = true } arbitrary = { workspace = true, optional = true } -borsh = { workspace = true } +borsh = { workspace = true, features = ["rc", "bytes"] } bcs = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -serde = { workspace = true } +serde = { workspace = true, features = ["rc"] } serde_json = { workspace = true } thiserror = { workspace = true } sov-rollup-interface = { path = "../../rollup-interface", version = "0.3" } diff --git a/rollup-interface/Cargo.toml b/rollup-interface/Cargo.toml index 98ec97e08..14e4ff41f 100644 --- a/rollup-interface/Cargo.toml +++ b/rollup-interface/Cargo.toml @@ -19,12 +19,11 @@ exclude = [ anyhow = { workspace = true, default-features = false } async-trait = { workspace = true } borsh = { workspace = true } -bytes = { workspace = true } +bytes = { workspace = true, optional = true, default-features = true } digest = { workspace = true } hex = { workspace = true } serde = { workspace = true } sha2 = { workspace = true, optional = true } -spin = { workspace = true } thiserror = { workspace = true, optional = true } tokio = { workspace = true, optional = true } @@ -45,13 +44,14 @@ proptest-derive = { workspace = true } [features] default = ["std"] native = [] -fuzzing = ["proptest", "proptest-derive", "sha2"] +fuzzing = ["proptest", "proptest-derive", "sha2", "std"] mocks = ["sha2", "bytes/serde"] std = [ "anyhow/default", "bincode", "borsh/default", - "bytes/default", + "borsh/bytes", + "bytes", "digest/default", "hex/default", "proptest?/default", diff --git a/rollup-interface/src/lib.rs b/rollup-interface/src/lib.rs index 38baf257d..de096fd2c 100644 --- a/rollup-interface/src/lib.rs +++ b/rollup-interface/src/lib.rs @@ -6,6 +6,8 @@ #![cfg_attr(not(feature = "std"), no_std)] #![deny(missing_docs)] +extern crate alloc; + mod state_machine; pub use state_machine::*; @@ -16,15 +18,9 @@ pub use {anyhow, digest}; /// A facade for the `std` crate. pub mod maybestd { - pub use borsh::maybestd::{borrow, boxed, collections, format, io, rc, string, vec}; - - /// A facade for the `sync` std module. - pub mod sync { - #[cfg(feature = "std")] - pub use std::sync::Mutex; + // sync will be available only when the target supports atomic operations + #[cfg(target_has_atomic = "ptr")] + pub use alloc::sync; - pub use borsh::maybestd::sync::*; - #[cfg(not(feature = "std"))] - pub use spin::Mutex; - } + pub use borsh::maybestd::{borrow, boxed, collections, format, io, string, vec}; } diff --git a/rollup-interface/src/state_machine/da.rs b/rollup-interface/src/state_machine/da.rs index 95c866c71..cf85dd21b 100644 --- a/rollup-interface/src/state_machine/da.rs +++ b/rollup-interface/src/state_machine/da.rs @@ -1,14 +1,11 @@ //! Defines traits and types used by the rollup to verify claims about the //! DA layer. -use core::cmp::min; use core::fmt::Debug; use borsh::{BorshDeserialize, BorshSerialize}; -use bytes::Buf; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use crate::maybestd::vec::Vec; use crate::zk::ValidityCondition; use crate::BasicAddress; @@ -70,12 +67,13 @@ pub trait DaVerifier { ) -> Result<::ValidityCondition, Self::Error>; } +#[cfg(feature = "std")] #[derive(Debug, Clone, Serialize, Deserialize, BorshDeserialize, BorshSerialize, PartialEq)] /// Simple structure that implements the Read trait for a buffer and counts the number of bytes read from the beginning. /// Useful for the partial blob reading optimization: we know for each blob how many bytes have been read from the beginning. /// /// Because of soundness issues we cannot implement the Buf trait because the prover could get unproved blob data using the chunk method. -pub struct CountedBufReader { +pub struct CountedBufReader { /// The original blob data. inner: B, @@ -84,7 +82,8 @@ pub struct CountedBufReader { accumulator: Vec, } -impl CountedBufReader { +#[cfg(feature = "std")] +impl CountedBufReader { /// Creates a new buffer reader with counter from an objet that implements the buffer trait pub fn new(inner: B) -> Self { let buf_size = inner.remaining(); @@ -104,7 +103,7 @@ impl CountedBufReader { } // `Buf::advance` would panic if `num_bytes` was greater than the length of the remaining unverified data, // but we just advance to the end of the buffer. - let num_to_read = min(remaining, requested); + let num_to_read = core::cmp::min(remaining, requested); // Extend the inner vector with zeros (copy_to_slice requires the vector to have // the correct *length* not just capacity) self.accumulator diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index c88eb0c36..64ae0d481 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -2,16 +2,10 @@ use core::fmt::Display; use core::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; -use bytes::Bytes; use serde::{Deserialize, Serialize}; -use crate::da::{ - BlobReaderTrait, BlockHashTrait, BlockHeaderTrait, CountedBufReader, DaSpec, DaVerifier, Time, -}; +use crate::da::{BlockHashTrait, BlockHeaderTrait, Time}; use crate::maybestd::string::String; -use crate::maybestd::vec::Vec; -use crate::mocks::MockValidityCond; -use crate::services::da::SlotData; use crate::{BasicAddress, RollupAddress}; const JAN_1_2023: i64 = 1672531200; @@ -112,60 +106,6 @@ impl Display for MockAddress { impl BasicAddress for MockAddress {} impl RollupAddress for MockAddress {} -#[derive( - Debug, - Clone, - PartialEq, - borsh::BorshDeserialize, - borsh::BorshSerialize, - serde::Serialize, - serde::Deserialize, -)] - -/// A mock BlobTransaction from a DA layer used for testing. -pub struct MockBlob { - address: MockAddress, - hash: [u8; 32], - data: CountedBufReader, -} - -impl BlobReaderTrait for MockBlob { - type Address = MockAddress; - - fn sender(&self) -> Self::Address { - self.address - } - - fn hash(&self) -> [u8; 32] { - self.hash - } - - fn verified_data(&self) -> &[u8] { - self.data.accumulator() - } - - fn total_len(&self) -> usize { - self.data.total_len() - } - - #[cfg(feature = "native")] - fn advance(&mut self, num_bytes: usize) -> &[u8] { - self.data.advance(num_bytes); - self.verified_data() - } -} - -impl MockBlob { - /// Creates a new mock blob with the given data, claiming to have been published by the provided address. - pub fn new(data: Vec, address: MockAddress, hash: [u8; 32]) -> Self { - Self { - address, - data: CountedBufReader::new(bytes::Bytes::from(data)), - hash, - } - } -} - /// A mock hash digest. #[derive( Clone, @@ -241,63 +181,10 @@ impl BlockHeaderTrait for MockBlockHeader { } } -/// A mock block type used for testing. -#[derive(Serialize, Deserialize, PartialEq, core::fmt::Debug, Clone)] -pub struct MockBlock { - /// The header of this block. - pub header: MockBlockHeader, - /// Validity condition - pub validity_cond: MockValidityCond, - /// Blobs - pub blobs: Vec, -} - -impl Default for MockBlock { - fn default() -> Self { - Self { - header: MockBlockHeader { - prev_hash: [0; 32].into(), - hash: [1; 32].into(), - height: 0, - }, - validity_cond: Default::default(), - blobs: Default::default(), - } - } -} - -impl SlotData for MockBlock { - type BlockHeader = MockBlockHeader; - type Cond = MockValidityCond; - - fn hash(&self) -> [u8; 32] { - self.header.hash.0 - } - - fn header(&self) -> &Self::BlockHeader { - &self.header - } - - fn validity_condition(&self) -> MockValidityCond { - self.validity_cond - } -} - /// A [`DaSpec`] suitable for testing. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)] pub struct MockDaSpec; -impl DaSpec for MockDaSpec { - type SlotHash = MockHash; - type BlockHeader = MockBlockHeader; - type BlobTransaction = MockBlob; - type Address = MockAddress; - type ValidityCondition = MockValidityCond; - type InclusionMultiProof = [u8; 32]; - type CompletenessProof = (); - type ChainParams = (); -} - /// The configuration for mock da #[derive(Debug, Clone, PartialEq, serde::Deserialize, serde::Serialize)] pub struct MockDaConfig { @@ -309,26 +196,6 @@ pub struct MockDaConfig { /// DaVerifier used in tests. pub struct MockDaVerifier {} -impl DaVerifier for MockDaVerifier { - type Spec = MockDaSpec; - - type Error = anyhow::Error; - - fn new(_params: ::ChainParams) -> Self { - Self {} - } - - fn verify_relevant_tx_list( - &self, - _block_header: &::BlockHeader, - _txs: &[::BlobTransaction], - _inclusion_proof: ::InclusionMultiProof, - _completeness_proof: ::CompletenessProof, - ) -> Result<::ValidityCondition, Self::Error> { - Ok(Default::default()) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/rollup-interface/src/state_machine/mocks/mod.rs b/rollup-interface/src/state_machine/mocks/mod.rs index da23f093a..f463f3566 100644 --- a/rollup-interface/src/state_machine/mocks/mod.rs +++ b/rollup-interface/src/state_machine/mocks/mod.rs @@ -4,13 +4,17 @@ mod da; #[cfg(all(feature = "native", feature = "tokio"))] mod service; +#[cfg(feature = "std")] +mod use_std; mod validity_condition; mod zk_vm; pub use da::{ - MockAddress, MockBlob, MockBlock, MockBlockHeader, MockDaConfig, MockDaSpec, MockDaVerifier, - MockHash, MOCK_SEQUENCER_DA_ADDRESS, + MockAddress, MockBlockHeader, MockDaConfig, MockDaSpec, MockDaVerifier, MockHash, + MOCK_SEQUENCER_DA_ADDRESS, }; #[cfg(all(feature = "native", feature = "tokio"))] pub use service::MockDaService; +#[cfg(feature = "std")] +pub use use_std::{MockBlob, MockBlock}; pub use validity_condition::{MockValidityCond, MockValidityCondChecker}; pub use zk_vm::{MockCodeCommitment, MockProof, MockZkvm}; diff --git a/rollup-interface/src/state_machine/mocks/use_std.rs b/rollup-interface/src/state_machine/mocks/use_std.rs new file mode 100644 index 000000000..ad3f5a704 --- /dev/null +++ b/rollup-interface/src/state_machine/mocks/use_std.rs @@ -0,0 +1,134 @@ +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +use crate::da::{BlobReaderTrait, CountedBufReader, DaSpec, DaVerifier}; +use crate::mocks::{ + MockAddress, MockBlockHeader, MockDaSpec, MockDaVerifier, MockHash, MockValidityCond, +}; +use crate::services::da::SlotData; + +#[derive( + Debug, + Clone, + PartialEq, + borsh::BorshDeserialize, + borsh::BorshSerialize, + serde::Serialize, + serde::Deserialize, +)] +/// A mock BlobTransaction from a DA layer used for testing. +pub struct MockBlob { + address: MockAddress, + hash: [u8; 32], + data: CountedBufReader, +} + +impl MockBlob { + /// Creates a new mock blob with the given data, claiming to have been published by the provided address. + pub fn new(data: Vec, address: MockAddress, hash: [u8; 32]) -> Self { + Self { + address, + data: CountedBufReader::new(bytes::Bytes::from(data)), + hash, + } + } +} + +impl BlobReaderTrait for MockBlob { + type Address = MockAddress; + + fn sender(&self) -> Self::Address { + self.address + } + + fn hash(&self) -> [u8; 32] { + self.hash + } + + fn verified_data(&self) -> &[u8] { + self.data.accumulator() + } + + fn total_len(&self) -> usize { + self.data.total_len() + } + + #[cfg(feature = "native")] + fn advance(&mut self, num_bytes: usize) -> &[u8] { + self.data.advance(num_bytes); + self.verified_data() + } +} + +/// A mock block type used for testing. +#[derive(Serialize, Deserialize, PartialEq, core::fmt::Debug, Clone)] +pub struct MockBlock { + /// The header of this block. + pub header: MockBlockHeader, + /// Validity condition + pub validity_cond: MockValidityCond, + /// Blobs + pub blobs: Vec, +} + +impl Default for MockBlock { + fn default() -> Self { + Self { + header: MockBlockHeader { + prev_hash: [0; 32].into(), + hash: [1; 32].into(), + height: 0, + }, + validity_cond: Default::default(), + blobs: Default::default(), + } + } +} + +impl SlotData for MockBlock { + type BlockHeader = MockBlockHeader; + type Cond = MockValidityCond; + + fn hash(&self) -> [u8; 32] { + self.header.hash.0 + } + + fn header(&self) -> &Self::BlockHeader { + &self.header + } + + fn validity_condition(&self) -> MockValidityCond { + self.validity_cond + } +} + +impl DaSpec for MockDaSpec { + type SlotHash = MockHash; + type BlockHeader = MockBlockHeader; + type BlobTransaction = MockBlob; + type Address = MockAddress; + type ValidityCondition = MockValidityCond; + type InclusionMultiProof = [u8; 32]; + type CompletenessProof = (); + type ChainParams = (); +} + +impl DaVerifier for MockDaVerifier { + type Spec = MockDaSpec; + + type Error = anyhow::Error; + + fn new(_params: ::ChainParams) -> Self { + Self {} + } + + fn verify_relevant_tx_list( + &self, + _block_header: &::BlockHeader, + _txs: &[::BlobTransaction], + _inclusion_proof: ::InclusionMultiProof, + _completeness_proof: ::CompletenessProof, + ) -> Result<::ValidityCondition, Self::Error> { + Ok(Default::default()) + } +} diff --git a/rollup-interface/src/state_machine/mod.rs b/rollup-interface/src/state_machine/mod.rs index 95810e5cc..4b4a90c0b 100644 --- a/rollup-interface/src/state_machine/mod.rs +++ b/rollup-interface/src/state_machine/mod.rs @@ -5,6 +5,7 @@ pub mod da; pub mod stf; pub mod zk; +#[cfg(feature = "std")] pub use bytes::{Buf, BufMut, Bytes, BytesMut}; use serde::de::DeserializeOwned; use serde::Serialize; From 3ea123515347a95506d7865eae15f721dc6d821d Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 15:03:11 +0200 Subject: [PATCH 25/28] fix cargo docs --- rollup-interface/src/state_machine/mocks/da.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup-interface/src/state_machine/mocks/da.rs b/rollup-interface/src/state_machine/mocks/da.rs index 64ae0d481..8cb006935 100644 --- a/rollup-interface/src/state_machine/mocks/da.rs +++ b/rollup-interface/src/state_machine/mocks/da.rs @@ -181,7 +181,7 @@ impl BlockHeaderTrait for MockBlockHeader { } } -/// A [`DaSpec`] suitable for testing. +/// A [`crate::da::DaSpec`] suitable for testing. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)] pub struct MockDaSpec; From 31939eae5c680ea38aa5932dae91195d1012cc86 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 15:37:57 +0200 Subject: [PATCH 26/28] require borsh features for sov-db --- full-node/db/sov-db/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/full-node/db/sov-db/Cargo.toml b/full-node/db/sov-db/Cargo.toml index a1d661422..9f234d27c 100644 --- a/full-node/db/sov-db/Cargo.toml +++ b/full-node/db/sov-db/Cargo.toml @@ -23,10 +23,10 @@ sov-rollup-interface = { path = "../../../rollup-interface", version = "0.3", fe anyhow = { workspace = true, default-features = true } arbitrary = { workspace = true, optional = true } byteorder = { workspace = true, default-features = true } -borsh = { workspace = true, default-features = true } +borsh = { workspace = true, default-features = true, features = ["bytes", "rc"] } proptest = { workspace = true, optional = true, default-features = true } proptest-derive = { workspace = true, optional = true } -serde = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true, features = ["rc"] } tempfile = { workspace = true, optional = true } rocksdb = { workspace = true } bincode = { workspace = true } From c3dcf9114aa4fae38798bc1cebd48f778f3b8820 Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 16:07:03 +0200 Subject: [PATCH 27/28] add check_no_std to required all-green ci jobs --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dc5a0a293..be6aa44e5 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -71,6 +71,7 @@ jobs: # which should be required to pass before merging a PR, don't forget to # update this list! - check + - check_no_std - hack - nextest - test From fce8c53e0ea5b28828ea7572eed94a271acda37c Mon Sep 17 00:00:00 2001 From: Victor Lopez Date: Wed, 25 Oct 2023 17:20:18 +0200 Subject: [PATCH 28/28] clean check_no_std ci cache commands --- .github/workflows/rust.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index be6aa44e5..0e6ebb265 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -136,12 +136,7 @@ jobs: override: true - uses: Swatinem/rust-cache@v2 with: - cache-provider: "buildjet" - shared-key: cargo-check-cache save-if: ${{ github.ref == 'refs/heads/nightly' }} - workspaces: | - . - fuzz - name: Run check run: make check-no-std