diff --git a/bin/magi.rs b/bin/magi.rs index 2fd7d692..6d523dfd 100644 --- a/bin/magi.rs +++ b/bin/magi.rs @@ -74,21 +74,7 @@ pub struct Cli { impl Cli { pub fn to_config(self) -> Config { - let chain = match self.network.as_str() { - "optimism" => ChainConfig::optimism(), - "optimism-goerli" => ChainConfig::optimism_goerli(), - "optimism-sepolia" => ChainConfig::optimism_sepolia(), - "base" => ChainConfig::base(), - "base-goerli" => ChainConfig::base_goerli(), - "base-sepolia" => ChainConfig::base_sepolia(), - file if file.ends_with(".json") => ChainConfig::from_json(file), - _ => panic!( - "Invalid network name. \\ - Please use one of the following: 'optimism', 'optimism-goerli', 'base-goerli'. \\ - You can also use a JSON file path for custom configuration." - ), - }; - + let chain = ChainConfig::from_network_name(&self.network); let config_path = home_dir().unwrap().join(".magi/magi.toml"); let cli_config = CliConfig::from(self); Config::new(&config_path, cli_config, chain) diff --git a/docker/README.md b/docker/README.md index 83e2fe6c..0bc1879d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -6,7 +6,7 @@ This contains a simple docker setup for running magi and op-geth. Begin by copying `.env.default` to `.env`. You can set the network to sync to by changing the `NETWORK` value (supported options are optimism-goerli and base-goerli). Make sure to set the `L1_RPC_URL` value to a valid RPC URL for the L1 being used by the given network. If you are running in production, you may also want to set a secure `JWT_SECRET` value. You can create a new secret by running `openssl rand -hex 32`. -To run both magi and op-geth together, run `docker compose up`. To run just op-geth without magi for local developement, run `COMPOSE_PROFILES=no-magi docker compose up` +To run both magi and op-geth together, run `docker compose up`. To run just op-geth without magi for local development, run `COMPOSE_PROFILES=no-magi docker compose up` ## Troubleshooting If you are getting `permission denied` errors when attempting to run `docker-compose`, try `sudo docker compose` instead. This is often required when running docker depending on how it was installed. diff --git a/docs/architecture.md b/docs/architecture.md index 377a5907..a4de9397 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -34,7 +34,7 @@ The Pipeline is broken up into [stages](../src/derive/stages/mod.rs) as follows. ##### Batcher Transactions -The [Batcher Transactions](../src/derive/stages/batcher_transactions.rs) stage pulls transactions from its configured channel receiver, passed down from the [Pipeline](../src/derive/mod.rs) parent. To construct a [Batcher Transaction](../src/derive/stages/batcher_transactions.rs) from the raw transaction data, it constructs [Frames](../src/derive/stages/batcher_transactions.rs) following the [Batch Submission Wire Format](https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#batch-submission-wire-format) documented in the [Optimism Specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/README.md). +The [Batcher Transactions](../src/derive/stages/batcher_transactions.rs) stage pulls transactions from its configured channel receiver, passed down from the [Pipeline](../src/derive/mod.rs) parent. To construct a [Batcher Transaction](../src/derive/stages/batcher_transactions.rs) from the raw transaction data, it constructs [Frames](../src/derive/stages/batcher_transactions.rs) following the [Batch Submission Wire Format](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md#batch-submission-wire-format) documented in the [Optimism Specs](https://github.com/ethereum-optimism/specs/tree/main). ##### Channels @@ -44,7 +44,7 @@ Remember, since the [L1 Chain Watcher](#l1-chain-watcher) is spawned as a separa ##### Batches -Next up, the [Batches](../src/derive/stages/batches.rs) stage iterates over the prior [Channel](../src/derive/stages/channels.rs) stage, decoding [Batch](../src/derive/stages/batches.rs) objects from the inner channel data. [Batch](../src/derive/stages/batches.rs) objects are RLP-decoded from the channel data following the [Batch Encoding Format](https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#batch-format), detailed below. +Next up, the [Batches](../src/derive/stages/batches.rs) stage iterates over the prior [Channel](../src/derive/stages/channels.rs) stage, decoding [Batch](../src/derive/stages/batches.rs) objects from the inner channel data. [Batch](../src/derive/stages/batches.rs) objects are RLP-decoded from the channel data following the [Batch Encoding Format](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md#batch-format), detailed below. For version 0, [Batch](../src/derive/stages/batches.rs) objects are encoded as follows: diff --git a/docs/devnet.md b/docs/devnet.md index 02e9f36b..cf5415d5 100644 --- a/docs/devnet.md +++ b/docs/devnet.md @@ -38,7 +38,7 @@ NETWORK=devnet # To avoid potential conflicts with the default ports in the OP devnet, it's recommended to modify the RPC ports. -# The exeuction client Auth RPC port. +# The execution client Auth RPC port. EXECUTION_CLIENT_AUTH_RPC_PORT=5551 # The execution client RPC port. diff --git a/src/common/mod.rs b/src/common/mod.rs index 34dce8f3..d671aea0 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -15,9 +15,13 @@ use crate::engine::ExecutionPayload; /// Selected block header info #[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Serialize, Deserialize)] pub struct BlockInfo { + /// The block hash pub hash: H256, + /// The block number pub number: u64, + /// The parent block hash pub parent_hash: H256, + /// The block timestamp pub timestamp: u64, } @@ -28,8 +32,11 @@ pub struct RawTransaction(pub Vec); /// L1 epoch block #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] pub struct Epoch { + /// The block number pub number: u64, + /// The block hash pub hash: H256, + /// The block timestamp pub timestamp: u64, } @@ -50,6 +57,7 @@ impl From for Value { impl TryFrom> for BlockInfo { type Error = eyre::Report; + /// Converts a [Block] to [BlockInfo] fn try_from(block: Block) -> Result { let number = block .number @@ -78,6 +86,7 @@ impl From for Value { } impl From<&ExecutionPayload> for BlockInfo { + /// Converts an [ExecutionPayload] to [BlockInfo] fn from(value: &ExecutionPayload) -> Self { Self { number: value.block_number.as_u64(), @@ -88,18 +97,29 @@ impl From<&ExecutionPayload> for BlockInfo { } } +/// Represents the `setL1BlockValues` transaction inputs included in the first transaction of every L2 block. pub struct AttributesDepositedCall { + /// The L1 block number of the corresponding epoch this belongs to. pub number: u64, + /// The L1 block timestamp of the corresponding epoch this belongs to. pub timestamp: u64, + /// The L1 block basefee of the corresponding epoch this belongs to. pub basefee: U256, + /// The L1 block hash of the corresponding epoch this belongs to. pub hash: H256, + /// The L2 block's position within the epoch. pub sequence_number: u64, + /// A versioned hash of the current authorized batcher sender. pub batcher_hash: H256, + /// The current L1 fee overhead to apply to L2 transactions cost computation. Unused after Ecotone hard fork. pub fee_overhead: U256, + /// The current L1 fee scalar to apply to L2 transactions cost computation. Unused after Ecotone hard fork. pub fee_scalar: U256, } +/// A type alias for the `setL1BlockValues` function parameter types type SetL1BlockValueInput = (u64, u64, U256, H256, u64, H256, U256, U256); +/// The `setL1BlockValues` human-readable ABI const L1_BLOCK_CONTRACT_ABI: &str = r#"[ function setL1BlockValues(uint64 _number,uint64 _timestamp, uint256 _basefee, bytes32 _hash,uint64 _sequenceNumber,bytes32 _batcherHash,uint256 _l1FeeOverhead,uint256 _l1FeeScalar) external ]"#; @@ -107,6 +127,7 @@ const L1_BLOCK_CONTRACT_ABI: &str = r#"[ impl TryFrom for AttributesDepositedCall { type Error = eyre::Report; + /// Decodes and converts the given bytes (calldata) into [AttributesDepositedCall]. fn try_from(value: Bytes) -> Result { let abi = BaseContract::from(parse_abi_str(L1_BLOCK_CONTRACT_ABI)?); @@ -135,6 +156,7 @@ impl TryFrom for AttributesDepositedCall { } impl From<&AttributesDepositedCall> for Epoch { + /// Converts [AttributesDepositedCall] to an [Epoch] consisting of the number, hash & timestamp of the corresponding L1 epoch block. fn from(call: &AttributesDepositedCall) -> Self { Self { number: call.number, @@ -145,6 +167,7 @@ impl From<&AttributesDepositedCall> for Epoch { } impl Decodable for RawTransaction { + /// Decodes RLP encoded bytes into [RawTransaction] bytes fn decode(rlp: &Rlp) -> Result { let tx_bytes: Vec = rlp.as_val()?; Ok(Self(tx_bytes)) diff --git a/src/config/mod.rs b/src/config/mod.rs index 234a5df4..5bcfd89a 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -36,31 +36,35 @@ impl FromStr for SyncMode { } } -/// A system configuration +/// The global `Magi` configuration. #[derive(Debug, Clone, Deserialize, Default)] pub struct Config { - /// The base chain RPC URL + /// The L1 chain RPC URL pub l1_rpc_url: String, /// The L2 chain RPC URL pub l2_rpc_url: String, /// The L2 engine API URL pub l2_engine_url: String, - /// The base chain config + /// The L2 chain config pub chain: ChainConfig, - /// Engine API JWT Secret + /// Engine API JWT Secret. /// This is used to authenticate with the engine API pub jwt_secret: String, /// A trusted L2 RPC URL to use for fast/checkpoint syncing pub checkpoint_sync_url: Option, - /// The port of RPC server + /// The port of the `Magi` RPC server pub rpc_port: u16, /// The socket address of RPC server pub rpc_addr: String, /// The devnet mode. + /// If devnet is enabled. pub devnet: bool, } impl Config { + /// Creates a new [Config], based on a config TOML and/or CLI flags. + /// + /// If a setting exists in the TOML and is also passed via CLI, the CLI will take priority. pub fn new(config_path: &PathBuf, cli_config: CliConfig, chain: ChainConfig) -> Self { let defaults_provider = Serialized::defaults(DefaultsProvider::default()); let chain_provider: Serialized = chain.into(); @@ -81,8 +85,8 @@ impl Config { figment::error::Kind::MissingField(field) => { let field = field.replace('_', "-"); println!("\x1b[91merror\x1b[0m: missing configuration field: {field}"); - println!("\n\ttry supplying the propoper command line argument: --{field}"); - println!("\talternatively, you can add the field to your magi.toml file or as an environment variable"); + println!("\n\ttry supplying the proper command line argument: --{field}"); + println!("\talternatively, you can add the field to your magi.toml file"); println!("\nfor more information, check the github README"); } _ => println!("cannot parse configuration: {err}"), @@ -93,28 +97,35 @@ impl Config { } } -/// Chain config items derived from the CLI +/// Magi config items derived from the CLI #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CliConfig { + /// The L1 RPC #[serde(skip_serializing_if = "Option::is_none")] pub l1_rpc_url: Option, + /// The L2 execution client RPC #[serde(skip_serializing_if = "Option::is_none")] pub l2_rpc_url: Option, + /// The L2 engine RPC #[serde(skip_serializing_if = "Option::is_none")] pub l2_engine_url: Option, + /// The JWT secret used to authenticate with the engine #[serde(skip_serializing_if = "Option::is_none")] pub jwt_secret: Option, + /// A trusted L2 RPC used to obtain data from when using checkpoint sync mode. #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint_sync_url: Option, + /// The port to serve the Magi RPC on. #[serde(skip_serializing_if = "Option::is_none")] pub rpc_port: Option, #[serde(skip_serializing_if = "Option::is_none")] pub rpc_addr: Option, + /// If Magi is running in devnet mode. #[serde(default)] pub devnet: bool, } -/// A Chain Configuration +/// Configurations for a blockchain. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ChainConfig { /// The network name @@ -123,7 +134,7 @@ pub struct ChainConfig { pub l1_chain_id: u64, /// The L2 chain id pub l2_chain_id: u64, - /// The L1 block referenced by the L2 chain + /// The L1 genesis block referenced by the L2 chain pub l1_start_epoch: Epoch, /// The L2 genesis block info pub l2_genesis: BlockInfo, @@ -133,7 +144,7 @@ pub struct ChainConfig { pub batch_inbox: Address, /// The deposit contract address pub deposit_contract: Address, - /// The L1 system config contract + /// The L1 system config contract address pub system_config_contract: Address, /// The maximum byte size of all pending channels pub max_channel_size: u64, @@ -157,6 +168,7 @@ pub struct ChainConfig { } impl Default for ChainConfig { + /// Defaults to the Optimism [ChainConfig] fn default() -> Self { ChainConfig::optimism() } @@ -178,7 +190,7 @@ pub struct SystemConfig { } impl SystemConfig { - /// Encoded batch sender as a H256 + /// Encodes batch sender as a H256 pub fn batcher_hash(&self) -> H256 { let mut batch_sender_bytes = self.batch_sender.as_bytes().to_vec(); let mut batcher_hash = iter::repeat(0).take(12).collect::>(); @@ -190,13 +202,18 @@ impl SystemConfig { /// System accounts #[derive(Debug, Clone)] pub struct SystemAccounts { + /// The address that submits attributes deposited transactions in every L2 block pub attributes_depositor: Address, + /// The contract address that attributes deposited transactions are submitted to pub attributes_predeploy: Address, + /// The contract address that holds fees paid to the sequencer during transaction execution & block production pub fee_vault: Address, } +/// Wrapper around a [ChainConfig] #[derive(Debug, Clone, Serialize, Deserialize)] struct ChainProvider { + /// The [ChainConfig] which is unique for each blockchain chain: ChainConfig, } @@ -206,15 +223,20 @@ impl From for Serialized { } } +/// Provides default values for the L2 RPC & engine. #[derive(Debug, Clone, Serialize, Deserialize)] struct DefaultsProvider { + /// The L2 execution node RPC l2_rpc_url: String, + /// The L2 engine RPC l2_engine_url: String, + /// The port to serve the Magi RPC server on rpc_port: u16, rpc_addr: String, } impl Default for DefaultsProvider { + /// Provides default values for the L2 RPC & engine. fn default() -> Self { Self { l2_rpc_url: "http://127.0.0.1:8545".to_string(), @@ -226,13 +248,32 @@ impl Default for DefaultsProvider { } impl ChainConfig { - /// Read and parse a chain config object from a JSON file path + /// Read and parse the [ChainConfig] from a JSON file path pub fn from_json(path: &str) -> Self { let file = std::fs::File::open(path).unwrap(); let external: ExternalChainConfig = serde_json::from_reader(file).unwrap(); external.into() } + /// Generates a [ChainConfig] instance from a given network name. + pub fn from_network_name(network: &str) -> Self { + match network.to_lowercase().as_str() { + "optimism" => Self::optimism(), + "optimism-goerli" => Self::optimism_goerli(), + "optimism-sepolia" => Self::optimism_sepolia(), + "base" => Self::base(), + "base-goerli" => Self::base_goerli(), + "base-sepolia" => Self::base_sepolia(), + file if file.ends_with(".json") => Self::from_json(file), + _ => panic!( + "Invalid network name. \\ + Please use one of the following: 'optimism', 'optimism-goerli', 'optimism-sepolia', 'base-goerli', 'base-sepolia', 'base'. \\ + You can also use a JSON file path for custom configuration." + ), + } + } + + /// [ChainConfig] for Optimism pub fn optimism() -> Self { Self { network: "optimism".to_string(), @@ -269,10 +310,11 @@ impl ChainConfig { blocktime: 2, regolith_time: 0, canyon_time: 170499240, - delta_time: u64::MAX, + delta_time: 1708560000, } } + /// [ChainConfig] for Optimism Goerli pub fn optimism_goerli() -> Self { Self { network: "optimism-goerli".to_string(), @@ -312,6 +354,8 @@ impl ChainConfig { blocktime: 2, } } + + /// [ChainConfig] for Optimism Sepolia pub fn optimism_sepolia() -> Self { Self { network: "optimism-sepolia".to_string(), @@ -352,6 +396,7 @@ impl ChainConfig { } } + /// [ChainConfig] for Base pub fn base() -> Self { Self { network: "base".to_string(), @@ -386,10 +431,11 @@ impl ChainConfig { blocktime: 2, regolith_time: 0, canyon_time: 1704992401, - delta_time: u64::MAX, + delta_time: 1708560000, } } + /// [ChainConfig] for Base Goerli pub fn base_goerli() -> Self { Self { network: "base-goerli".to_string(), @@ -428,6 +474,7 @@ impl ChainConfig { } } + /// [ChainConfig] for Base Sepolia pub fn base_sepolia() -> Self { Self { network: "base-sepolia".to_string(), @@ -468,6 +515,7 @@ impl ChainConfig { } impl Default for SystemAccounts { + /// The default system addresses fn default() -> Self { Self { attributes_depositor: addr("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001"), @@ -477,14 +525,17 @@ impl Default for SystemAccounts { } } +/// Converts a [str] to an [Address] fn addr(s: &str) -> Address { Address::from_str(s).unwrap() } +/// Converts a [str] to a [H256]. fn hash(s: &str) -> H256 { H256::from_str(s).unwrap() } +/// Returns default blocktime of 2 (seconds). fn default_blocktime() -> u64 { 2 } @@ -496,46 +547,73 @@ fn default_blocktime() -> u64 { /// genesis devnet setup command `--outfile.rollup` flag. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ExternalChainConfig { + /// Genesis settings genesis: ExternalGenesisInfo, + /// Block time of the chain block_time: u64, + /// Maximum timestamp drift max_sequencer_drift: u64, + /// Number of L1 blocks in a sequence window seq_window_size: u64, + /// The max timeout for a channel (as measured by the frame L1 block number) channel_timeout: u64, + /// The L1 chain id l1_chain_id: u64, + /// The L2 chain id l2_chain_id: u64, + /// Timestamp of the regolith hardfork regolith_time: u64, + /// Timestamp of the canyon hardfork canyon_time: u64, + /// Timestamp of the delta hardfork delta_time: u64, + /// The batch inbox address batch_inbox_address: Address, + /// The deposit contract address deposit_contract_address: Address, + /// The L1 system config contract address l1_system_config_address: Address, } +/// The Genesis property of the `rollup.json` file used in `op-node`. #[derive(Debug, Clone, Serialize, Deserialize)] struct ExternalGenesisInfo { + /// L1 genesis block hash & number l1: ChainGenesisInfo, + /// L2 genesis block hash & number l2: ChainGenesisInfo, + /// L2 genesis block timestamp l2_time: u64, + /// Genesis [SystemConfigInfo] settings system_config: SystemConfigInfo, } +/// System config settings #[derive(Debug, Clone, Serialize, Deserialize)] struct SystemConfigInfo { + /// The authorized batch sender that sends batcher transactions to the batch inbox on L1 #[serde(rename = "batcherAddr")] batcher_addr: Address, + /// The current L1 fee overhead to apply to L2 transactions cost computation. Unused after Ecotone hard fork. overhead: H256, + /// The current L1 fee scalar to apply to L2 transactions cost computation. Unused after Ecotone hard fork. scalar: H256, + /// The gas limit for L2 blocks #[serde(rename = "gasLimit")] gas_limit: u64, } +/// Genesis block hash & number #[derive(Debug, Clone, Serialize, Deserialize)] struct ChainGenesisInfo { + /// Genesis block number hash: H256, + /// Genesis block hash number: u64, } impl From for ChainConfig { + /// Converts an [ExternalChainConfig] to [ChainConfig]. fn from(external: ExternalChainConfig) -> Self { Self { network: "external".to_string(), @@ -576,6 +654,8 @@ impl From for ChainConfig { } impl From for ExternalChainConfig { + /// Converts [ChainConfig] into [ExternalChainConfig] + /// which is the format used in ``rollup.json`` by `op-node` fn from(chain_config: ChainConfig) -> Self { let mut overhead = [0; 32]; let mut scalar = [0; 32]; @@ -798,4 +878,89 @@ mod test { addr("0x4200000000000000000000000000000000000016") ); } + + #[test] + fn test_chain_config_from_name() { + let optimism_config = ChainConfig::optimism(); + let desired_config = ChainConfig::from_network_name("opTimIsm"); + + assert_eq!(optimism_config.max_seq_drift, desired_config.max_seq_drift); + + assert_eq!( + optimism_config.seq_window_size, + desired_config.seq_window_size + ); + assert_eq!( + optimism_config.channel_timeout, + desired_config.channel_timeout + ); + assert_eq!(optimism_config.l1_chain_id, desired_config.l1_chain_id); + assert_eq!(optimism_config.l2_chain_id, desired_config.l2_chain_id); + assert_eq!(optimism_config.blocktime, desired_config.blocktime); + assert_eq!(optimism_config.regolith_time, desired_config.regolith_time); + assert_eq!(optimism_config.batch_inbox, desired_config.batch_inbox); + assert_eq!( + optimism_config.deposit_contract, + desired_config.deposit_contract + ); + assert_eq!( + optimism_config.system_config_contract, + desired_config.system_config_contract + ); + + assert_eq!( + optimism_config.l1_start_epoch.hash, + desired_config.l1_start_epoch.hash + ); + assert_eq!( + optimism_config.l1_start_epoch.number, + desired_config.l1_start_epoch.number + ); + assert_eq!( + optimism_config.l2_genesis.hash, + desired_config.l2_genesis.hash + ); + assert_eq!( + optimism_config.l2_genesis.number, + desired_config.l2_genesis.number + ); + assert_eq!( + optimism_config.l2_genesis.timestamp, + desired_config.l2_genesis.timestamp + ); + + assert_eq!( + optimism_config.system_config.batch_sender, + desired_config.system_config.batch_sender + ); + + assert_eq!( + optimism_config.system_config.l1_fee_overhead, + desired_config.system_config.l1_fee_overhead + ); + assert_eq!( + optimism_config.system_config.l1_fee_scalar, + desired_config.system_config.l1_fee_scalar + ); + + assert_eq!( + optimism_config.system_config.gas_limit, + desired_config.system_config.gas_limit + ); + + // Generate Base config and compare with optimism config + let desired_config = ChainConfig::from_network_name("base"); + assert_ne!(optimism_config.l2_chain_id, desired_config.l2_chain_id); + assert_ne!( + optimism_config.deposit_contract, + desired_config.deposit_contract + ); + } + + #[test] + #[should_panic(expected = "Invalid network name")] + fn test_chain_config_unknown_chain() { + // Should panic if chain isn't recognized + _ = ChainConfig::from_network_name("magichain"); + } } diff --git a/src/derive/mod.rs b/src/derive/mod.rs index a13df2d9..3bc11be8 100644 --- a/src/derive/mod.rs +++ b/src/derive/mod.rs @@ -14,21 +14,30 @@ use self::{ state::State, }; +/// A module that handles the block derivation stages pub mod stages; +/// A module that keeps track of the current derivation state, caching previous L1 and L2 blocks pub mod state; +/// A module that extends the [Iterator] trait with a `purge` method mod purgeable; pub use purgeable::PurgeableIterator; +/// The derivation pipeline is iterated on to update attributes for new blocks. pub struct Pipeline { + /// A channel sender to send a `BatcherTransactionMessage` batcher_transaction_sender: mpsc::Sender, + /// An `Attributes` object attributes: Attributes, + /// Pending `PayloadAttributes` pending_attributes: Option, } impl Iterator for Pipeline { type Item = PayloadAttributes; + /// Returns the pending [PayloadAttributes]. + /// If none exist it will call `Attributes::next()` to advance to the next block and return those attributes instead. fn next(&mut self) -> Option { if self.pending_attributes.is_some() { self.pending_attributes.take() @@ -39,6 +48,7 @@ impl Iterator for Pipeline { } impl Pipeline { + /// Creates a new [Pipeline] and initializes [BatcherTransactions], [Channels], [Batches], and [Attributes] pub fn new(state: Arc>, config: Arc, seq: u64) -> Result { let (tx, rx) = mpsc::channel(); let batcher_transactions = BatcherTransactions::new(rx); @@ -53,12 +63,15 @@ impl Pipeline { }) } + /// Sends [BatcherTransactions] & the L1 block they were received in to the [BatcherTransactions] receiver. pub fn push_batcher_transactions(&self, txs: Vec>, l1_origin: u64) -> Result<()> { self.batcher_transaction_sender .send(BatcherTransactionMessage { txs, l1_origin })?; Ok(()) } + /// Returns a reference to the pending [PayloadAttributes]. + /// If none are pending, it will call `self.next()` to advance to the next block and return those attributes instead. pub fn peek(&mut self) -> Option<&PayloadAttributes> { if self.pending_attributes.is_none() { let next_attributes = self.next(); @@ -68,6 +81,7 @@ impl Pipeline { self.pending_attributes.as_ref() } + /// Resets the state of `self.attributes` by calling `Attributes::purge()` pub fn purge(&mut self) -> Result<()> { self.attributes.purge(); Ok(()) diff --git a/src/derive/purgeable.rs b/src/derive/purgeable.rs index fe7b8957..27c2d107 100644 --- a/src/derive/purgeable.rs +++ b/src/derive/purgeable.rs @@ -1,4 +1,5 @@ /// Iterator that can purge itself pub trait PurgeableIterator: Iterator { + /// Purges and resets an iterator fn purge(&mut self); } diff --git a/src/derive/stages/attributes.rs b/src/derive/stages/attributes.rs index 3f1b3959..d4bec3bf 100644 --- a/src/derive/stages/attributes.rs +++ b/src/derive/stages/attributes.rs @@ -15,17 +15,24 @@ use crate::l1::L1Info; use super::block_input::BlockInput; +/// Represents the `Payload Attributes Derivation` stage. pub struct Attributes { + /// An iterator over [BlockInput]: used to derive [PayloadAttributes] block_input_iter: Box>>, + /// The current derivation [State]. Contains cached L1 & L2 blocks and details of the current safe head & safe epoch. state: Arc>, + /// The sequence number of the block being processed sequence_number: u64, + /// The block hash of the corresponding L1 epoch block. epoch_hash: H256, + /// The global Magi [Config] config: Arc, } impl Iterator for Attributes { type Item = PayloadAttributes; + /// Iterates over the next [BlockInput] and returns the [PayLoadAttributes](struct@PayloadAttributes) from this block. fn next(&mut self) -> Option { self.block_input_iter .next() @@ -35,6 +42,7 @@ impl Iterator for Attributes { } impl PurgeableIterator for Attributes { + /// Purges the [BlockInput] iterator, and sets the [epoch_hash](Attributes::epoch_hash) to the [safe_epoch](State::safe_epoch) hash. fn purge(&mut self) { self.block_input_iter.purge(); self.sequence_number = 0; @@ -43,6 +51,7 @@ impl PurgeableIterator for Attributes { } impl Attributes { + /// Creates new [Attributes] and sets the `epoch_hash` to the current L1 safe epoch block hash. pub fn new( block_input_iter: Box>>, state: Arc>, @@ -60,6 +69,9 @@ impl Attributes { } } + /// Processes a given [BlockInput] and returns [PayloadAttributes] for the block. + /// + /// Calls `derive_transactions` to generate the raw transactions fn derive_attributes(&mut self, input: BlockInput) -> PayloadAttributes { tracing::debug!("attributes derived from block {}", input.epoch.number); tracing::debug!("batch epoch hash {:?}", input.epoch.hash); @@ -97,6 +109,11 @@ impl Attributes { } } + /// Derives the deposited transactions and all other L2 user transactions from a given block. Deposited txs include: + /// - L1 Attributes Deposited (exists as the first tx in every block) + /// - User deposits sent to the L1 deposit contract (0 or more and will only exist in the first block of the epoch) + /// + /// Returns a [RawTransaction] vector containing all of the transactions for the L2 block. fn derive_transactions( &self, input: BlockInput, @@ -118,6 +135,7 @@ impl Attributes { transactions } + /// Derives the attributes deposited transaction for a given block and converts this to a [RawTransaction]. fn derive_attributes_deposited( &self, l1_info: &L1Info, @@ -130,6 +148,7 @@ impl Attributes { RawTransaction(attributes_tx.rlp_bytes().to_vec()) } + /// Derives the user deposited txs for the current epoch, and returns a [RawTransaction] vector fn derive_user_deposited(&self) -> Vec { let state = self.state.read().unwrap(); state @@ -147,6 +166,9 @@ impl Attributes { .unwrap_or_default() } + /// Sets the current sequence number. If `self.epoch_hash` != `batch_epoch_hash` this is set to 0; otherwise it increments by 1. + /// + /// Also sets `self.epoch_hash` to `batch_epoch_hash` fn update_sequence_number(&mut self, batch_epoch_hash: H256) { if self.epoch_hash != batch_epoch_hash { self.sequence_number = 0; @@ -158,19 +180,29 @@ impl Attributes { } } +/// Represents a deposited transaction #[derive(Debug)] struct DepositedTransaction { + /// Unique identifier to identify the origin of the deposit source_hash: H256, + /// Address of the sender from: Address, + /// Address of the recipient, or None if the transaction is a contract creation to: Option
, + /// ETH value to mint on L2 mint: U256, + /// ETH value to send to the recipient value: U256, + /// Gas limit for the L2 transaction gas: u64, + /// If true, does not use L2 gas. Always False post-Regolith. is_system_tx: bool, + /// Any additional calldata or contract creation code. data: Vec, } impl From for DepositedTransaction { + /// Converts [AttributesDeposited] to a [DepositedTransaction] fn from(attributes_deposited: AttributesDeposited) -> Self { let hash = attributes_deposited.hash.to_fixed_bytes(); let seq = H256::from_low_u64_be(attributes_deposited.sequence_number).to_fixed_bytes(); @@ -199,6 +231,7 @@ impl From for DepositedTransaction { } impl From for DepositedTransaction { + /// Converts [UserDeposited] to a [DepositedTransaction] fn from(user_deposited: UserDeposited) -> Self { let hash = user_deposited.l1_block_hash.to_fixed_bytes(); let log_index = user_deposited.log_index.into(); @@ -227,6 +260,7 @@ impl From for DepositedTransaction { } impl Encodable for DepositedTransaction { + /// Converts a [DepositedTransaction] to RLP bytes and appends to the stream. fn rlp_append(&self, s: &mut RlpStream) { s.append_raw(&[0x7E], 1); s.begin_list(8); @@ -247,21 +281,33 @@ impl Encodable for DepositedTransaction { } } +/// Represents the attributes provided as calldata in an attributes deposited transaction. #[derive(Debug)] struct AttributesDeposited { + /// The L1 epoch block number number: u64, + /// The L1 epoch block timestamp timestamp: u64, + /// The L1 epoch base fee base_fee: U256, + /// The L1 epoch block hash hash: H256, + /// The L2 block's position in the epoch sequence_number: u64, + /// A versioned hash of the current authorized batcher sender. batcher_hash: H256, + /// The current L1 fee overhead to apply to L2 transactions cost computation. Unused after Ecotone hard fork. fee_overhead: U256, + /// The current L1 fee scalar to apply to L2 transactions cost computation. Unused after Ecotone hard fork. fee_scalar: U256, + /// Gas limit: 1_000_000 if post-Regolith, otherwise 150_000_000 gas: u64, + /// False if post-Regolith, otherwise true is_system_tx: bool, } impl AttributesDeposited { + /// Creates [AttributesDeposited] from the given data. fn from_block_info(l1_info: &L1Info, seq: u64, batch_timestamp: u64, config: &Config) -> Self { let is_regolith = batch_timestamp >= config.chain.regolith_time; let is_system_tx = !is_regolith; @@ -282,6 +328,7 @@ impl AttributesDeposited { } } + /// Encodes [AttributesDeposited] into `setL1BlockValues` transaction calldata, including the selector. fn encode(&self) -> Vec { let tokens = vec![ Token::Uint(self.number.into()), @@ -301,23 +348,35 @@ impl AttributesDeposited { } } +/// Represents a user deposited transaction. #[derive(Debug, Clone)] pub struct UserDeposited { + /// Address of the sender pub from: Address, + /// Address of the recipient, or None if the transaction is a contract creation pub to: Address, + /// ETH value to mint on L2 pub mint: U256, + /// ETH value to send to the recipient pub value: U256, + /// Gas limit for the L2 transaction pub gas: u64, + /// If this is a contract creation pub is_creation: bool, + /// Calldata or contract creation code if `is_creation` is true. pub data: Vec, + /// The L1 block number this was submitted in. pub l1_block_num: u64, + /// The L1 block hash this was submitted in. pub l1_block_hash: H256, + /// The index of the emitted deposit event log in the L1 block. pub log_index: U256, } impl TryFrom for UserDeposited { type Error = eyre::Report; + /// Converts the emitted L1 deposit event log into [UserDeposited] fn try_from(log: Log) -> Result { let opaque_data = decode(&[ParamType::Bytes], &log.data)?[0] .clone() diff --git a/src/derive/stages/batcher_transactions.rs b/src/derive/stages/batcher_transactions.rs index ccd72809..65c11a80 100644 --- a/src/derive/stages/batcher_transactions.rs +++ b/src/derive/stages/batcher_transactions.rs @@ -5,19 +5,26 @@ use std::collections::VecDeque; use crate::derive::PurgeableIterator; +/// Represents a transaction sent to the `Batch Inbox` on L1. pub struct BatcherTransactionMessage { + /// The L2 transactions included in this batch pub txs: Vec>, + /// The L1 block number this transaction was included in pub l1_origin: u64, } +/// Receives [BatcherTransactionMessage] messages from a channel and stores these in a [VecDeque]. pub struct BatcherTransactions { + /// [VecDeque] containing [BatcherTransaction] txs: VecDeque, + /// [BatcherTransactionMessage] channel [receiver](mpsc::Receiver) transaction_rx: mpsc::Receiver, } impl Iterator for BatcherTransactions { type Item = BatcherTransaction; + /// Receives new [BatcherTransactionMessage] messages from the channel and adds these to the deque. Pops and returns the first deque element. fn next(&mut self) -> Option { self.process_incoming(); self.txs.pop_front() @@ -25,6 +32,7 @@ impl Iterator for BatcherTransactions { } impl PurgeableIterator for BatcherTransactions { + /// Resets itself by clearing the channel and deque fn purge(&mut self) { // drain the channel first while self.transaction_rx.try_recv().is_ok() {} @@ -33,6 +41,7 @@ impl PurgeableIterator for BatcherTransactions { } impl BatcherTransactions { + /// Creates a new [BatcherTransactions] pub fn new(transaction_rx: mpsc::Receiver) -> Self { Self { transaction_rx, @@ -40,6 +49,7 @@ impl BatcherTransactions { } } + /// Receives new [BatcherTransactionMessage] messages from the channel and adds these to the end of the deque. pub fn process_incoming(&mut self) { while let Ok(BatcherTransactionMessage { txs, l1_origin }) = self.transaction_rx.try_recv() { @@ -56,13 +66,17 @@ impl BatcherTransactions { } } +/// A single batcher transaction #[derive(Debug, Clone)] pub struct BatcherTransaction { + /// The version byte. pub version: u8, + /// The rollup payload consisting of 1 or more frames. pub frames: Vec, } impl BatcherTransaction { + /// Creates a new [BatcherTransaction] pub fn new(data: &[u8], l1_origin: u64) -> Result { let version = data[0]; let frame_data = data.get(1..).ok_or(eyre::eyre!("No frame data"))?; @@ -79,17 +93,25 @@ impl BatcherTransaction { } } +/// A channel frame. #[derive(Debug, Default, Clone)] pub struct Frame { + /// A unique identifier for the channel containing the frame. pub channel_id: u128, + /// The index of the frame within the channel pub frame_number: u16, + /// The byte length of frame_data. Capped to 1,000,000 bytes. pub frame_data_len: u32, + /// A sequence of bytes belonging to the channel, logically after the previous frames pub frame_data: Vec, + /// If the frame is the last in the channel pub is_last: bool, + /// The L1 block number this frame was submitted in. pub l1_inclusion_block: u64, } impl Frame { + /// Converts a sequence of bytes into a [Frame] fn from_data(data: &[u8], offset: usize, l1_inclusion_block: u64) -> Result<(Self, usize)> { let data = &data[offset..]; diff --git a/src/derive/stages/batches.rs b/src/derive/stages/batches.rs index 768c98ab..6d2a6a67 100644 --- a/src/derive/stages/batches.rs +++ b/src/derive/stages/batches.rs @@ -17,13 +17,17 @@ use super::channels::Channel; use super::single_batch::SingleBatch; use super::span_batch::SpanBatch; +/// Tracks and processes batches pub struct Batches { /// Mapping of timestamps to batches batches: BTreeMap, - /// Pending block inputs to be outputed + /// Pending block inputs to be outputted pending_inputs: Vec>, + /// A [Channels](super::channels::Channels) iterator, which iterates over [BatcherTransaction](super::batcher_transactions::BatcherTransaction) and processes channel frames. channel_iter: I, + /// The current derivation [State]. Contains cached L1 & L2 blocks and details of the current safe head & safe epoch. state: Arc>, + /// The global Magi [Config] config: Arc, } @@ -33,6 +37,7 @@ where { type Item = BlockInput; + /// Attempts to decode batches in the next channel and returns a [BlockInput](struct@BlockInput) fn next(&mut self) -> Option { self.try_next().unwrap_or_else(|_| { tracing::debug!("Failed to decode batch"); @@ -45,6 +50,7 @@ impl PurgeableIterator for Batches where I: PurgeableIterator, { + /// Clears the channels iterator, batches mapping & pending block inputs fn purge(&mut self) { self.channel_iter.purge(); self.batches.clear(); @@ -53,6 +59,7 @@ where } impl Batches { + /// Creates a new [Batches] instance pub fn new(channel_iter: I, state: Arc>, config: Arc) -> Self { Self { batches: BTreeMap::new(), @@ -68,6 +75,15 @@ impl Batches where I: Iterator, { + /// Initiates the process of building a channel from channel frames. + /// + /// Decodes batches from the built channel and inserts them into the batches mapping. + /// + /// Checks validity of batches in batches mapping, removing any that are invalid. + /// + /// Attempts to derive the first valid batch and returns the first [BlockInput] in the batch. Remaining [BlockInput]s are inserted into `pending_inputs`, + /// + /// If there are already pending inputs, it will skip the above and simply return the first pending [BlockInput] fn try_next(&mut self) -> Result>> { if !self.pending_inputs.is_empty() { return Ok(Some(self.pending_inputs.remove(0))); @@ -115,6 +131,7 @@ where None } } else { + // No valid batches were found. If we are past the epoch number + sequencer window size, and aware of the next epoch -> return a new BlockInput with no transactions. let state = self.state.read().unwrap(); let current_l1_block = state.current_epoch_num; @@ -147,6 +164,7 @@ where }) } + /// Returns [BlockInput] elements that are newer than the current `safe_head`. fn filter_inputs(&self, inputs: Vec>) -> Vec> { inputs .into_iter() @@ -154,6 +172,7 @@ where .collect() } + /// Returns the validity of a [Batch] fn batch_status(&self, batch: &Batch) -> BatchStatus { match batch { Batch::Single(batch) => self.single_batch_status(batch), @@ -161,6 +180,7 @@ where } } + /// Returns the validity of a [SingleBatch] fn single_batch_status(&self, batch: &SingleBatch) -> BatchStatus { let state = self.state.read().unwrap(); let epoch = state.safe_epoch; @@ -243,6 +263,7 @@ where BatchStatus::Accept } + /// Returns the validity of a [SpanBatch] fn span_batch_status(&self, batch: &SpanBatch) -> BatchStatus { let state = self.state.read().unwrap(); let epoch = state.safe_epoch; @@ -378,6 +399,7 @@ where } } +/// Attempts to decode channel data into a [Batch] vector fn decode_batches(channel: &Channel, chain_id: u64) -> Result> { let mut channel_data = Vec::new(); let d = Decoder::new(channel.data.as_slice())?; @@ -423,13 +445,17 @@ fn decode_batches(channel: &Channel, chain_id: u64) -> Result> { Ok(batches) } +/// The type of batch - either a [SingleBatch] or [SpanBatch] #[derive(Debug, Clone)] pub enum Batch { + /// A [SingleBatch] Single(SingleBatch), + /// A [SpanBatch] Span(SpanBatch), } impl Batch { + /// Returns the batch timestamp pub fn timestamp(&self, config: &Config) -> u64 { match self { Batch::Single(batch) => batch.timestamp, @@ -437,6 +463,7 @@ impl Batch { } } + /// Returns a [BlockInput] vector of the blocks in a batch. pub fn as_inputs(&self, config: &Config) -> Vec> { match self { Batch::Single(batch) => vec![batch.block_input()], @@ -445,10 +472,15 @@ impl Batch { } } +/// The status of a batch. #[derive(Debug, Clone, PartialEq)] enum BatchStatus { + /// The batch is invalid Drop, + /// The batch is valid Accept, + /// Not enough data to decide Undecided, + /// Batch is for a future block Future, } diff --git a/src/derive/stages/block_input.rs b/src/derive/stages/block_input.rs index 403cb3e3..6d982af8 100644 --- a/src/derive/stages/block_input.rs +++ b/src/derive/stages/block_input.rs @@ -7,19 +7,26 @@ use crate::{ derive::state::State, }; +/// A marker trait to allow representing an epoch as either a block number or an [Epoch] pub trait EpochType {} impl EpochType for u64 {} impl EpochType for Epoch {} +/// A single L2 block derived from a batch. #[derive(Debug)] pub struct BlockInput { + /// Timestamp of the L2 block pub timestamp: u64, + /// The corresponding epoch pub epoch: E, + /// Transactions included in this block pub transactions: Vec, + /// The L1 block this batch was fully derived from pub l1_inclusion_block: u64, } impl BlockInput { + /// Returns the [BlockInput] with full [Epoch] details. pub fn with_full_epoch(self, state: &Arc>) -> Result> { let state = state.read().map_err(|_| eyre::eyre!("lock poisoned"))?; let epoch = state diff --git a/src/derive/stages/channels.rs b/src/derive/stages/channels.rs index b28ec2bc..923e27a0 100644 --- a/src/derive/stages/channels.rs +++ b/src/derive/stages/channels.rs @@ -3,7 +3,9 @@ use std::sync::Arc; use super::batcher_transactions::{BatcherTransaction, Frame}; use crate::{config::Config, derive::PurgeableIterator}; +/// Represents the `channel bank` stage to track & process channels pub struct Channels { + /// A [BatcherTransaction] iterator batcher_tx_iter: I, /// List of incomplete channels pending_channels: Vec, @@ -21,6 +23,7 @@ where { type Item = Channel; + /// Processes frames until there are either none left or a channel is ready fn next(&mut self) -> Option { self.process_frames() } @@ -30,6 +33,7 @@ impl PurgeableIterator for Channels where I: PurgeableIterator, { + /// Clears the iterator, `pending_channels` & `frame_bank` fn purge(&mut self) { self.batcher_tx_iter.purge(); self.pending_channels.clear(); @@ -38,6 +42,7 @@ where } impl Channels { + /// Creates a new [Channels] instance pub fn new(batcher_tx_iter: I, config: Arc) -> Self { Self { batcher_tx_iter, @@ -143,10 +148,15 @@ where /// An intermediate pending channel #[derive(Debug)] pub struct PendingChannel { + /// A unique identifier for this channel channel_id: u128, + /// Frames seen so far frames: Vec, + /// The number of frames seen size: Option, + /// The highest L1 block number frames in this channel were submitted in highest_l1_block: u64, + /// The lowest L1 block number frames in this channel were submitted in lowest_l1_block: u64, } @@ -168,6 +178,7 @@ impl PendingChannel { } } + /// Checks if we have seen the last frame for this channel pub fn is_complete(&self) -> bool { self.size == Some(self.frames.len() as u16) } @@ -186,6 +197,7 @@ impl PendingChannel { .fold(Vec::new(), |a, b| [a, b.frame_data.clone()].concat()) } + /// The highest L1 inclusion block of frames in the channel. pub fn l1_inclusion_block(&self) -> u64 { self.frames .iter() @@ -222,12 +234,16 @@ impl PendingChannel { /// A Channel #[derive(Debug, Clone, Eq, PartialEq)] pub struct Channel { + /// A unique identifier for the channel pub id: u128, + /// Data from all of the frames in the channel pub data: Vec, + /// The L1 block that the channel can be fully built from. This is the inclusion block of the last frame in the channel. pub l1_inclusion_block: u64, } impl From for Channel { + /// Converts a [PendingChannel] to a [Channel] fn from(pc: PendingChannel) -> Self { Channel { id: pc.channel_id, diff --git a/src/derive/stages/mod.rs b/src/derive/stages/mod.rs index 561ecc37..b8586b77 100644 --- a/src/derive/stages/mod.rs +++ b/src/derive/stages/mod.rs @@ -1,7 +1,20 @@ +/// A module to handle the payload attributes derivation stage pub mod attributes; + +/// A module to handle batcher transactions and frames pub mod batcher_transactions; + +/// A module to handle processing of a [Batch](crate::derive::stages::batches::Batch) pub mod batches; + +/// A module to handle building a [BlockInput](crate::derive::stages::block_input::BlockInput) mod block_input; + +/// A module to handle the channel bank derivation stage pub mod channels; + +/// A module to handle processing of a [SingleBatch](crate::derive::stages::single_batch::SingleBatch) mod single_batch; + +/// A module to handle processing of a [SpanBatch](crate::derive::stages::span_batch::SpanBatch) mod span_batch; diff --git a/src/derive/stages/single_batch.rs b/src/derive/stages/single_batch.rs index b9e2570d..bb6baea7 100644 --- a/src/derive/stages/single_batch.rs +++ b/src/derive/stages/single_batch.rs @@ -7,17 +7,25 @@ use crate::common::RawTransaction; use super::block_input::BlockInput; +/// Represents a single batch: a single encoded L2 block #[derive(Debug, Clone)] pub struct SingleBatch { + /// Block hash of the previous L2 block pub parent_hash: H256, + /// The batch epoch number. Same as the first L1 block number in the epoch. pub epoch_num: u64, + /// The block hash of the first L1 block in the epoch pub epoch_hash: H256, + /// The L2 block timestamp of this batch pub timestamp: u64, + /// The L2 block transactions in this batch pub transactions: Vec, + /// The L1 block number this batch was fully derived from. pub l1_inclusion_block: u64, } impl SingleBatch { + /// Decodes RLP bytes into a [SingleBatch] pub fn decode(rlp: &Rlp, l1_inclusion_block: u64) -> Result { let parent_hash = rlp.val_at(0)?; let epoch_num = rlp.val_at(1)?; @@ -35,12 +43,14 @@ impl SingleBatch { }) } + /// If any transactions are empty or deposited transaction types. pub fn has_invalid_transactions(&self) -> bool { self.transactions .iter() .any(|tx| tx.0.is_empty() || tx.0[0] == 0x7E) } + /// Returns a [BlockInput] instance for this batch. Represents a single L2 block. pub fn block_input(&self) -> BlockInput { BlockInput { timestamp: self.timestamp, diff --git a/src/derive/stages/span_batch.rs b/src/derive/stages/span_batch.rs index 349a6dcf..ddf8166e 100644 --- a/src/derive/stages/span_batch.rs +++ b/src/derive/stages/span_batch.rs @@ -8,20 +8,31 @@ use crate::{common::RawTransaction, config::Config}; use super::block_input::BlockInput; +/// Represents a span batch: a range of encoded L2 blocks #[derive(Debug, Clone)] pub struct SpanBatch { + /// Uvarint encoded relative timestamp since L2 genesis pub rel_timestamp: u64, + /// Uvarint encoded L1 origin number of the last L2 block in the batch pub l1_origin_num: u64, + /// First 20 bytes of the parent hash of the first L2 block in the batch. pub parent_check: [u8; 20], + /// Last 20 bytes of the L1 origin hash of the last L2 block in the batch. pub l1_origin_check: [u8; 20], + /// Uvarint encoded number of L2 blocks in the batch. pub block_count: u64, + /// Bitlist of [SpanBatch.block_count] bits: 1 bit per block. pub origin_bits: Vec, + /// Uvarint encoded number of L2 transactions in this batch pub block_tx_counts: Vec, + /// The L2 transactions in this batch pub transactions: Vec, + /// The L1 block number this batch was derived from. pub l1_inclusion_block: u64, } impl SpanBatch { + /// Decodes a sequence of bytes into a [SpanBatch] pub fn decode(data: &[u8], l1_inclusion_block: u64, chain_id: u64) -> Result { let (rel_timestamp, data) = unsigned_varint::decode::u64(data)?; let (l1_origin_num, data) = unsigned_varint::decode::u64(data)?; @@ -47,6 +58,7 @@ impl SpanBatch { }) } + /// Returns a [BlockInput] vector for this batch. Contains all L2 block in the batch. pub fn block_inputs(&self, config: &Config) -> Vec> { let init_epoch_num = self.l1_origin_num - self @@ -84,6 +96,7 @@ impl SpanBatch { inputs } + /// Returns the L1 origin number of the last L2 block in the batch pub fn start_epoch_num(&self) -> u64 { self.l1_origin_num - self @@ -95,10 +108,12 @@ impl SpanBatch { } } +/// Splits a byte slice at the specified index (length) into a tuple of 2 byte slices fn take_data(data: &[u8], length: usize) -> (&[u8], &[u8]) { (&data[0..length], &data[length..]) } +/// Decodes a bitlist into boolean values and returns a tuple of booleans + the original bitlist. fn decode_bitlist(data: &[u8], len: u64) -> (Vec, &[u8]) { let mut bitlist = Vec::new(); @@ -117,6 +132,7 @@ fn decode_bitlist(data: &[u8], len: u64) -> (Vec, &[u8]) { (bitlist, data) } +/// Decodes the number of transactions in the batch into a U64 vector fn decode_block_tx_counts(data: &[u8], block_count: u64) -> Result<(Vec, &[u8])> { let mut tx_counts = Vec::new(); let mut data_ref = data; @@ -129,6 +145,7 @@ fn decode_block_tx_counts(data: &[u8], block_count: u64) -> Result<(Vec, &[ Ok((tx_counts, data_ref)) } +/// Decodes transactions in a batch and returns a [RawTransaction] vector fn decode_transactions( chain_id: u64, data: &[u8], @@ -272,6 +289,7 @@ fn decode_transactions( Ok((txs, data)) } +/// Decodes transaction nonces in the batch into a U64 vector fn decode_uvarint_list(data: &[u8], count: u64) -> (Vec, &[u8]) { let mut list = Vec::new(); let mut data_ref = data; @@ -285,6 +303,7 @@ fn decode_uvarint_list(data: &[u8], count: u64) -> (Vec, &[u8]) { (list, data_ref) } +/// Decodes EIP-2718 `TransactionType` formatted transactions in the batch into a [TxData] vector fn decode_tx_data(data: &[u8], tx_count: u64) -> (Vec, &[u8]) { let mut data_ref = data; let mut tx_datas = Vec::new(); @@ -351,28 +370,45 @@ fn decode_tx_data(data: &[u8], tx_count: u64) -> (Vec, &[u8]) { (tx_datas, data_ref) } +/// The transaction type - Legacy, EIP-2930, or EIP-1559 #[derive(Debug)] enum TxData { + /// A legacy transaction type Legacy { + /// Transaction value value: U256, + /// Transaction gas price gas_price: U256, + /// Transaction calldata data: Bytes, }, + /// An EIP-2930 transaction type Type1 { + /// Transaction value value: U256, + /// Transaction gas price gas_price: U256, + /// Transaction calldata data: Bytes, + /// Access list as specified in EIP-2930 access_list: AccessList, }, + /// An EIP-1559 transaction type Type2 { + /// Transaction value value: U256, + /// Max fee per gas as specified in EIP-1559 max_fee: U256, + /// Max priority fee as specified in EIP-1559 max_priority_fee: U256, + /// Transaction calldata data: Bytes, + /// Access list as specified in EIP-2930 access_list: AccessList, }, } +/// Decodes transaction `To` fields in the batch into an [Address] vector fn decode_tos(data: &[u8], count: u64) -> (Vec
, &[u8]) { let mut data_ref = data; let mut tos = Vec::new(); @@ -385,12 +421,14 @@ fn decode_tos(data: &[u8], count: u64) -> (Vec
, &[u8]) { (tos, data_ref) } +/// Decodes arbitrary slice of bytes into an [Address] fn decode_address(data: &[u8]) -> (Address, &[u8]) { let (address_bytes, data) = take_data(data, 20); let address = Address::from_slice(address_bytes); (address, data) } +/// Decodes transaction `R` & `S` signature fields in the batch into a (U256, U256) vector fn decode_signatures(data: &[u8], tx_count: u64) -> (Vec<(U256, U256)>, &[u8]) { let mut sigs = Vec::new(); let mut data_ref = data; @@ -407,6 +445,7 @@ fn decode_signatures(data: &[u8], tx_count: u64) -> (Vec<(U256, U256)>, &[u8]) { (sigs, data_ref) } +/// Decodes a U256 from an arbitrary slice of bytes fn decode_u256(data: &[u8]) -> (U256, &[u8]) { let (bytes, data) = take_data(data, 32); let value = U256::from_big_endian(bytes); diff --git a/src/derive/state.rs b/src/derive/state.rs index c4ead277..4eb89dc9 100644 --- a/src/derive/state.rs +++ b/src/derive/state.rs @@ -12,17 +12,26 @@ use crate::{ l1::L1Info, }; +/// Represents the current derivation state. Consists of cached L1 & L2 blocks, and details of the current safe head & safe epoch. pub struct State { + /// Map of L1 blocks from the current L1 safe epoch - ``seq_window_size`` l1_info: BTreeMap, + /// Map of L1 block hashes from the current L1 safe epoch - ``seq_window_size`` l1_hashes: BTreeMap, + /// Map of L2 blocks from the current L2 safe head - (``max_seq_drift`` / ``blocktime``) l2_refs: BTreeMap, + /// The current safe head pub safe_head: BlockInfo, + /// The current safe epoch pub safe_epoch: Epoch, + /// The current epoch number. Same as the first L1 block number in this sequencing window. pub current_epoch_num: u64, + /// Global config config: Arc, } impl State { + /// Creates a new [State] and fetches and caches a range of L2 blocks. pub async fn new( finalized_head: BlockInfo, finalized_epoch: Epoch, @@ -42,24 +51,28 @@ impl State { } } + /// Returns a cached L1 block by block hash pub fn l1_info_by_hash(&self, hash: H256) -> Option<&L1Info> { self.l1_info.get(&hash) } + /// Returns a cached L1 block by block number pub fn l1_info_by_number(&self, num: u64) -> Option<&L1Info> { self.l1_hashes .get(&num) .and_then(|hash| self.l1_info.get(hash)) } - pub fn l2_info_by_timestamp(&self, timestmap: u64) -> Option<&(BlockInfo, Epoch)> { - let block_num = (timestmap - self.config.chain.l2_genesis.timestamp) + /// Returns a cached L2 block by block timestamp + pub fn l2_info_by_timestamp(&self, timestamp: u64) -> Option<&(BlockInfo, Epoch)> { + let block_num = (timestamp - self.config.chain.l2_genesis.timestamp) / self.config.chain.blocktime + self.config.chain.l2_genesis.number; self.l2_refs.get(&block_num) } + /// Returns an epoch from an L1 block hash pub fn epoch_by_hash(&self, hash: H256) -> Option { self.l1_info_by_hash(hash).map(|info| Epoch { number: info.block_info.number, @@ -68,6 +81,7 @@ impl State { }) } + /// Returns an epoch by number. Same as the first L1 block number in the epoch's sequencing window. pub fn epoch_by_number(&self, num: u64) -> Option { self.l1_info_by_number(num).map(|info| Epoch { number: info.block_info.number, @@ -76,6 +90,9 @@ impl State { }) } + /// Inserts data from the ``l1_info`` parameter into ``l1_hashes`` & ``l1_info`` maps. + /// + /// This also updates ``current_epoch_num`` to the block number of the given ``l1_info``. pub fn update_l1_info(&mut self, l1_info: L1Info) { self.current_epoch_num = l1_info.block_info.number; @@ -86,6 +103,11 @@ impl State { self.prune(); } + /// Resets the state and updates the safe head with the given parameters. + /// + /// ``current_epoch_num`` is set to 0. + /// + /// ``l1_info`` & ``l1_hashes`` mappings are cleared. pub fn purge(&mut self, safe_head: BlockInfo, safe_epoch: Epoch) { self.current_epoch_num = 0; self.l1_info.clear(); @@ -94,6 +116,9 @@ impl State { self.update_safe_head(safe_head, safe_epoch); } + /// Sets ``safe_head`` & ``safe_epoch`` to the given parameters. + /// + /// Also inserts these details into ``l2_refs``. pub fn update_safe_head(&mut self, safe_head: BlockInfo, safe_epoch: Epoch) { self.safe_head = safe_head; self.safe_epoch = safe_epoch; @@ -102,6 +127,9 @@ impl State { .insert(self.safe_head.number, (self.safe_head, self.safe_epoch)); } + /// Removes keys from ``l1_info`` & ``l1_hashes`` mappings if older than ``self.safe_epoch.number`` - ``seq_window_size``. + /// + /// Removes keys from the ``l2_refs`` mapping if older than ``self.safe_head.number`` - (``max_seq_drift`` / ``blocktime``) fn prune(&mut self) { let prune_until = self .safe_epoch @@ -130,6 +158,9 @@ impl State { } } +/// Returns the L2 blocks from the given ``head_num`` - (``max_seq_drift`` / ``blocktime``) to ``head_num``. +/// +/// If the lookback period is before the genesis block, it will return L2 blocks starting from genesis. async fn l2_refs( head_num: u64, provider: &Provider, diff --git a/src/driver/engine_driver.rs b/src/driver/engine_driver.rs index 41ac6ced..a58b44ec 100644 --- a/src/driver/engine_driver.rs +++ b/src/driver/engine_driver.rs @@ -14,6 +14,7 @@ use crate::{ engine::{Engine, EngineApi, ExecutionPayload, ForkchoiceState, PayloadAttributes, Status}, }; +/// The EngineDriver is responsible for initiating block production & validation via the [EngineApi] pub struct EngineDriver { /// The L2 execution engine engine: Arc, @@ -34,6 +35,7 @@ pub struct EngineDriver { } impl EngineDriver { + /// Initiates validation & production of a new L2 block from the given [PayloadAttributes] and updates the forkchoice pub async fn handle_attributes(&mut self, attributes: PayloadAttributes) -> Result<()> { let block: Option> = self.block_at(attributes.timestamp.as_u64()).await; @@ -49,6 +51,7 @@ impl EngineDriver { } } + /// Instructs the engine to create a block and updates the forkchoice, based on a payload received via p2p gossip. pub async fn handle_unsafe_payload(&mut self, payload: &ExecutionPayload) -> Result<()> { self.push_payload(payload.clone()).await?; self.unsafe_head = payload.into(); @@ -63,17 +66,20 @@ impl EngineDriver { Ok(()) } + /// Updates the [EngineDriver] finalized head & epoch pub fn update_finalized(&mut self, head: BlockInfo, epoch: Epoch) { self.finalized_head = head; self.finalized_epoch = epoch; } + /// Sets the [EngineDriver] unsafe & safe heads, and safe epoch to the current finalized head & epoch. pub fn reorg(&mut self) { self.unsafe_head = self.finalized_head; self.safe_head = self.finalized_head; self.safe_epoch = self.finalized_epoch; } + /// Sends a `ForkchoiceUpdated` message to check if the [Engine] is ready. pub async fn engine_ready(&self) -> bool { let forkchoice = self.create_forkchoice_state(); self.engine @@ -82,6 +88,11 @@ impl EngineDriver { .is_ok() } + /// Initiates validation & production of a new block: + /// - Sends the [PayloadAttributes] to the engine via `engine_forkchoiceUpdatedV2` (V3 post Ecotone) and retrieves the [ExecutionPayload] + /// - Executes the [ExecutionPayload] to create a block via `engine_newPayloadV2` (V3 post Ecotone) + /// - Updates the [EngineDriver] `safe_head`, `safe_epoch`, and `unsafe_head` + /// - Updates the forkchoice and sends this to the engine via `engine_forkchoiceUpdatedV2` (v3 post Ecotone) async fn process_attributes(&mut self, attributes: PayloadAttributes) -> Result<()> { let new_epoch = *attributes.epoch.as_ref().unwrap(); @@ -101,6 +112,7 @@ impl EngineDriver { Ok(()) } + /// Updates the forkchoice by sending `engine_forkchoiceUpdatedV2` (v3 post Ecotone) to the engine with no payload. async fn skip_attributes( &mut self, attributes: PayloadAttributes, @@ -114,6 +126,7 @@ impl EngineDriver { Ok(()) } + /// Sends [PayloadAttributes] via a `ForkChoiceUpdated` message to the [Engine] and returns the [ExecutionPayload] sent by the Execution Client. async fn build_payload(&self, attributes: PayloadAttributes) -> Result { let forkchoice = self.create_forkchoice_state(); @@ -133,6 +146,7 @@ impl EngineDriver { self.engine.get_payload(id).await } + /// Sends the given [ExecutionPayload] to the [Engine] via `NewPayload` async fn push_payload(&self, payload: ExecutionPayload) -> Result<()> { let status = self.engine.new_payload(payload).await?; if status.status != Status::Valid && status.status != Status::Accepted { @@ -142,6 +156,7 @@ impl EngineDriver { Ok(()) } + /// Sends a `ForkChoiceUpdated` message to the [Engine] with the current `Forkchoice State` and no payload. async fn update_forkchoice(&self) -> Result<()> { let forkchoice = self.create_forkchoice_state(); @@ -156,6 +171,9 @@ impl EngineDriver { Ok(()) } + /// Updates the current `safe_head` & `safe_epoch`. + /// + /// Also updates the current `unsafe_head` to the given `new_head` if `reorg_unsafe` is `true`, or if the updated `safe_head` is newer than the current `unsafe_head` fn update_safe_head( &mut self, new_head: BlockInfo, @@ -174,6 +192,10 @@ impl EngineDriver { Ok(()) } + /// Creates a [ForkchoiceState]: + /// - `head_block` = `unsafe_head` + /// - `safe_block` = `safe_head` + /// - `finalized_block` = `finalized_head` fn create_forkchoice_state(&self) -> ForkchoiceState { ForkchoiceState { head_block_hash: self.unsafe_head.hash, @@ -182,6 +204,7 @@ impl EngineDriver { } } + /// Fetches the L2 block for a given timestamp from the L2 Execution Client async fn block_at(&self, timestamp: u64) -> Option> { let time_diff = timestamp as i64 - self.finalized_head.timestamp as i64; let blocks = time_diff / self.blocktime as i64; @@ -193,6 +216,7 @@ impl EngineDriver { } } +/// True if transactions in [PayloadAttributes] are not the same as those in a fetched L2 [Block] fn should_skip(block: &Block, attributes: &PayloadAttributes) -> Result { tracing::debug!( "comparing block at {} with attributes at {}", @@ -229,6 +253,7 @@ fn should_skip(block: &Block, attributes: &PayloadAttributes) -> Re } impl EngineDriver { + /// Creates a new [EngineDriver] and builds the [EngineApi] client pub fn new( finalized_head: BlockInfo, finalized_epoch: Epoch, diff --git a/src/driver/info.rs b/src/driver/info.rs index fe7d43c7..5577808c 100644 --- a/src/driver/info.rs +++ b/src/driver/info.rs @@ -4,19 +4,24 @@ use ethers::middleware::Middleware; use ethers::providers::{JsonRpcClient, Provider, ProviderError}; use ethers::types::{Block, BlockId, BlockNumber, Transaction}; +/// An asynchronous trait for fetching blocks along with their transactions. #[async_trait::async_trait] pub trait InnerProvider { + /// Retrieves a block and its transactions async fn get_block_with_txs( &self, block_id: BlockId, ) -> Result>, ProviderError>; } +/// Wrapper around a [Provider] pub struct HeadInfoFetcher<'a, P: JsonRpcClient> { + /// An ethers [Provider] implementing the [JsonRpcClient] trait inner: &'a Provider

, } impl<'a, P: JsonRpcClient> From<&'a Provider

> for HeadInfoFetcher<'a, P> { + /// Converts a [Provider] to a [HeadInfoFetcher] fn from(inner: &'a Provider

) -> Self { Self { inner } } @@ -24,6 +29,7 @@ impl<'a, P: JsonRpcClient> From<&'a Provider

> for HeadInfoFetcher<'a, P> { #[async_trait::async_trait] impl<'a, P: JsonRpcClient> InnerProvider for HeadInfoFetcher<'a, P> { + /// Fetches a block with transactions async fn get_block_with_txs( &self, block_id: BlockId, @@ -32,9 +38,11 @@ impl<'a, P: JsonRpcClient> InnerProvider for HeadInfoFetcher<'a, P> { } } +/// Provides a method to fetch the latest finalized block pub struct HeadInfoQuery {} impl HeadInfoQuery { + /// Fetches the latest finalized L2 block pub async fn get_head_info(p: &P, config: &Config) -> HeadInfo { p.get_block_with_txs(BlockId::Number(BlockNumber::Finalized)) .await diff --git a/src/driver/mod.rs b/src/driver/mod.rs index ac42f4af..e8ff1252 100644 --- a/src/driver/mod.rs +++ b/src/driver/mod.rs @@ -28,8 +28,13 @@ use crate::{ use self::engine_driver::EngineDriver; +/// A module to handle block production & validation mod engine_driver; + +/// A module to handle fetching blocks mod info; + +/// A module to handle conversions to a [HeadInfo] struct mod types; pub use types::*; @@ -52,9 +57,9 @@ pub struct Driver { chain_watcher: ChainWatcher, /// Channel to receive the shutdown signal from shutdown_recv: watch::Receiver, - /// Channel to receive unsafe block from + /// Channel to receive unsafe blocks from unsafe_block_recv: Receiver, - /// Channel to send unsafe signer updated to block handler + /// Channel to send unsafe signer updates to block handler unsafe_block_signer_sender: Sender

, /// Networking service network_service: Option, @@ -63,6 +68,7 @@ pub struct Driver { } impl Driver { + /// Creates a new [Driver] from the given [Config] pub async fn from_config(config: Config, shutdown_recv: watch::Receiver) -> Result { let client = reqwest::ClientBuilder::new() .timeout(Duration::from_secs(5)) @@ -150,6 +156,7 @@ impl Driver { } } + /// Loops until the [EngineApi] is online and receives a response from the engine. async fn await_engine_ready(&self) { while !self.engine_driver.engine_ready().await { self.check_shutdown().await; @@ -157,7 +164,7 @@ impl Driver { } } - /// Attempts to advance the execution node forward using either L1 info our + /// Attempts to advance the execution node forward using either L1 info or /// blocks received on the p2p network. async fn advance(&mut self) -> Result<()> { self.advance_safe_head().await?; @@ -217,6 +224,7 @@ impl Driver { Ok(()) } + /// Collects unsafe blocks received via p2p gossip and updates the forkchoice with the first available unsafe block. async fn advance_unsafe_head(&mut self) -> Result<()> { while let Ok(payload) = self.unsafe_block_recv.try_recv() { self.future_unsafe_blocks.push(payload); @@ -241,6 +249,7 @@ impl Driver { Ok(()) } + /// Updates the [State] `safe_head` fn update_state_head(&self) -> Result<()> { let mut state = self .state @@ -305,6 +314,7 @@ impl Driver { Ok(()) } + /// Updates the current finalized L2 block in the [EngineDriver] based on their inclusion in finalized L1 blocks fn update_finalized(&mut self) { let new_finalized = self .unfinalized_blocks @@ -322,6 +332,7 @@ impl Driver { .retain(|(_, _, inclusion, _)| *inclusion > self.finalized_l1_block_number); } + /// Begins p2p networking if fully synced with no unfinalized blocks fn try_start_networking(&mut self) -> Result<()> { if self.synced() { if let Some(service) = self.network_service.take() { @@ -332,12 +343,14 @@ impl Driver { Ok(()) } + /// Updates Prometheus metrics fn update_metrics(&self) { metrics::FINALIZED_HEAD.set(self.engine_driver.finalized_head.number as i64); metrics::SAFE_HEAD.set(self.engine_driver.safe_head.number as i64); metrics::SYNCED.set(self.synced() as i64); } + /// True if there are no unfinalized blocks fn synced(&self) -> bool { !self.unfinalized_blocks.is_empty() } diff --git a/src/driver/types.rs b/src/driver/types.rs index 5eca6132..7cda6b92 100644 --- a/src/driver/types.rs +++ b/src/driver/types.rs @@ -18,6 +18,7 @@ pub struct HeadInfo { impl TryFrom> for HeadInfo { type Error = eyre::Report; + /// Returns `HeadInfo` consisting of the L2 block, the L1 epoch block it belongs to, and the L2 block's position in the epoch. fn try_from(value: Block) -> Result { let tx_calldata = value .transactions diff --git a/src/engine/api.rs b/src/engine/api.rs index 64998c59..b88c8ef1 100644 --- a/src/engine/api.rs +++ b/src/engine/api.rs @@ -206,6 +206,7 @@ pub struct EngineApiErrorPayload { #[async_trait::async_trait] impl Engine for EngineApi { + /// Sends an `engine_forkchoiceUpdatedV2` (V3 post Ecotone) message to the engine. async fn forkchoice_updated( &self, forkchoice_state: ForkchoiceState, @@ -221,12 +222,14 @@ impl Engine for EngineApi { Ok(res) } + /// Sends an `engine_newPayloadV2` (V3 post Ecotone) message to the engine. async fn new_payload(&self, execution_payload: ExecutionPayload) -> Result { let params = vec![serde_json::to_value(execution_payload)?]; let res = self.post(ENGINE_NEW_PAYLOAD_V2, params).await?; Ok(res) } + /// Sends an `engine_getPayloadV2` (V3 post Ecotone) message to the engine. async fn get_payload(&self, payload_id: PayloadId) -> Result { let encoded = format!("{:x}", payload_id); let padded = format!("0x{:0>16}", encoded); @@ -238,9 +241,11 @@ impl Engine for EngineApi { } } +/// Wrapper around an [ExecutionPayload] #[derive(Debug, Serialize, Deserialize, Default)] #[serde(rename_all = "camelCase")] struct GetPayloadResponse { + /// The execution payload returned by the engine via `engine_getPayloadV2` (`engine_getPayloadV3` post Ecotone) execution_payload: ExecutionPayload, } diff --git a/src/engine/auth.rs b/src/engine/auth.rs index 0c6d09dc..fed17bdd 100644 --- a/src/engine/auth.rs +++ b/src/engine/auth.rs @@ -39,7 +39,7 @@ impl JwtSecret { } } - /// Generates a random [`JwtSecret`][crate::auth::JwtSecret]. + /// Generates a random [`JwtSecret`] pub fn random() -> Self { let random_bytes: [u8; 32] = rand::thread_rng().gen(); let secret = hex::encode(random_bytes); diff --git a/src/engine/payload.rs b/src/engine/payload.rs index 52b75125..cc25bb59 100644 --- a/src/engine/payload.rs +++ b/src/engine/payload.rs @@ -47,6 +47,7 @@ pub struct ExecutionPayload { impl TryFrom> for ExecutionPayload { type Error = eyre::Report; + /// Converts a [Block] to an [ExecutionPayload] fn try_from(value: Block) -> Result { let encoded_txs = (*value .transactions @@ -82,7 +83,7 @@ impl TryFrom> for ExecutionPayload { /// ## PayloadAttributes /// /// L2 extended payload attributes for Optimism. -/// For more details, visit the [Optimism specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md#extended-payloadattributesv1). +/// For more details, visit the [Optimism specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#extended-payloadattributesv1). #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct PayloadAttributes { diff --git a/src/engine/traits.rs b/src/engine/traits.rs index 8936ec87..3adc5525 100644 --- a/src/engine/traits.rs +++ b/src/engine/traits.rs @@ -10,30 +10,34 @@ use super::{ /// /// A set of methods that allow a consensus client to interact with an execution engine. /// This is a modified version of the [Ethereum Execution API Specs](https://github.com/ethereum/execution-apis), -/// as defined in the [Optimism Exec Engine Specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md). +/// as defined in the [Optimism Exec Engine Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md). #[async_trait] pub trait Engine: Send + Sync + 'static { /// ## forkchoice_updated /// - /// Updates were made to [`engine_forkchoiceUpdatedV1`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) - /// for L2. This updates which L2 blocks the engine considers to be canonical ([ForkchoiceState] argument), + /// Updates were made to [`engine_forkchoiceUpdatedV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_forkchoiceupdatedv2) + /// for L2: an extended [PayloadAttributes] + /// This updates which L2 blocks the engine considers to be canonical ([ForkchoiceState] argument), /// and optionally initiates block production ([PayloadAttributes] argument). /// /// ### Specification /// - /// method: engine_forkchoiceUpdatedV1 + /// method: engine_forkchoiceUpdatedV2 /// params: /// - [ForkchoiceState] /// - [PayloadAttributes] + /// /// timeout: 8s + /// /// returns: /// - [ForkChoiceUpdate] + /// /// potential errors: /// - code and message set in case an exception happens while the validating payload, updating the forkchoice or initiating the payload build process. /// /// ### Reference /// - /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md#engine_forkchoiceupdatedv1). + /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_forkchoiceupdatedv1). async fn forkchoice_updated( &self, forkchoice_state: ForkchoiceState, @@ -42,44 +46,52 @@ pub trait Engine: Send + Sync + 'static { /// ## new_payload /// - /// No modifications to [`engine_newPayloadV1`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) + /// No modifications to [`engine_newPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_newpayloadv2) /// were made for L2. Applies a L2 block to the engine state. /// /// ### Specification /// - /// method: engine_newPayloadV1 + /// method: engine_newPayloadV2 + /// /// params: /// - [ExecutionPayload] + /// /// timeout: 8s + /// /// returns: /// - [PayloadStatus] + /// /// potential errors: /// - code and message set in case an exception happens while processing the payload. /// /// ### Reference /// - /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md#engine_newPayloadv1). + /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_newPayloadv1). async fn new_payload(&self, execution_payload: ExecutionPayload) -> Result; /// ## get_payload /// - /// No modifications to [`engine_getPayloadV1`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_getpayloadv1) - /// were made for L2. Retrieves a payload by ID, prepared by [engine_forkchoiceUpdatedV1](EngineApi::engine_forkchoiceUpdatedV1) + /// No modifications to [`engine_getPayloadV2`](https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadv2) + /// were made for L2. Retrieves a payload by ID, prepared by [engine_forkchoiceUpdatedV2](super::EngineApi) /// when called with [PayloadAttributes]. /// /// ### Specification /// - /// method: engine_getPayloadV1 + /// method: engine_getPayloadV2 + /// /// params: /// - [PayloadId]: DATA, 8 Bytes - Identifier of the payload build process + /// /// timeout: 1s + /// /// returns: /// - [ExecutionPayload] + /// /// potential errors: /// - code and message set in case an exception happens while getting the payload. /// /// ### Reference /// - /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md#engine_getPayloadv1). + /// See more details in the [Optimism Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md#engine_getPayloadv1). async fn get_payload(&self, payload_id: PayloadId) -> Result; } diff --git a/src/l1/mod.rs b/src/l1/mod.rs index 0fbd3134..66bc9cae 100644 --- a/src/l1/mod.rs +++ b/src/l1/mod.rs @@ -20,9 +20,11 @@ use crate::{ derive::stages::attributes::UserDeposited, }; +/// [H256] event signature for `ConfigUpdate`. static CONFIG_UPDATE_TOPIC: Lazy = Lazy::new(|| H256::from_slice(&keccak256("ConfigUpdate(uint256,uint8,bytes)"))); +/// [H256] event signature for `TransactionDeposited`. static TRANSACTION_DEPOSITED_TOPIC: Lazy = Lazy::new(|| { H256::from_slice(&keccak256( "TransactionDeposited(address,address,uint256,bytes)", @@ -30,7 +32,7 @@ static TRANSACTION_DEPOSITED_TOPIC: Lazy = Lazy::new(|| { }); /// Handles watching the L1 chain and monitoring for new blocks, deposits, -/// and batcher transactions. The monitoring loop is spawned in a seperate +/// and batcher transactions. The monitoring loop is spawned in a separate /// task and communication happens via the internal channels. When ChainWatcher /// is dropped, the monitoring task is automatically aborted. pub struct ChainWatcher { @@ -95,13 +97,13 @@ struct InnerWatcher { provider: Arc>>, /// Channel to send block updates block_update_sender: mpsc::Sender, - /// Most recent ingested block + /// The current L1 block to process current_block: u64, - /// Most recent block + /// Most recent L1 block head_block: u64, - /// Most recent finalized block + /// Most recent finalized L1 block finalized_block: u64, - /// List of blocks that have not been finalized yet + /// List of L1 blocks that have not been finalized yet unfinalized_blocks: Vec, /// Mapping from block number to user deposits. Past block deposits /// are removed as they are no longer needed @@ -112,6 +114,7 @@ struct InnerWatcher { system_config_update: (u64, Option), } +/// A type alias for a vector of bytes. Represents the calldata in a batcher transaction type BatcherTransactionData = Vec; impl Drop for ChainWatcher { @@ -123,8 +126,7 @@ impl Drop for ChainWatcher { } impl ChainWatcher { - /// Creates a new ChainWatcher and begins the monitoring task. - /// Errors if the rpc url in the config is invalid. + /// Creates a new [ChainWatcher] pub fn new(l1_start_block: u64, l2_start_block: u64, config: Arc) -> Result { Ok(Self { handle: None, @@ -135,7 +137,7 @@ impl ChainWatcher { }) } - /// Starts the chain watcher at the given block numbers + /// Starts the chain watcher at the current [ChainWatcher] start blocks pub fn start(&mut self) -> Result<()> { if let Some(handle) = self.handle.take() { handle.abort(); @@ -191,6 +193,7 @@ impl ChainWatcher { } impl InnerWatcher { + /// Creates a new [InnerWatcher] async fn new( config: Arc, block_update_sender: mpsc::Sender, @@ -246,6 +249,13 @@ impl InnerWatcher { } } + /// Handles processing of the next L1 block. + /// + /// - Updates the finalized L1 block if this has changed and notifies the block update channel. + /// - Updates the L1 head block. + /// - Checks for system config changes and updates the [SystemConfig] in [InnerWatcher] if necessary. + /// - Fetches the block + user deposited transactions + /// - Notifies the block update channel that either a new block, or a reorg was received. async fn try_ingest_block(&mut self) -> Result<()> { if self.current_block > self.finalized_block { let finalized_block = self.get_finalized().await?; @@ -310,6 +320,7 @@ impl InnerWatcher { Ok(()) } + /// Checks L1 event logs for emitted `ConfigUpdated` events and updates the [SystemConfig] in the [InnerWatcher] async fn update_system_config(&mut self) -> Result<()> { let (last_update_block, _) = self.system_config_update; @@ -364,6 +375,8 @@ impl InnerWatcher { Ok(()) } + /// True if there are 2 or more unfinalized blocks, and the parent hash + /// of the last unfinalized block is not the hash of the second last unfinalized block fn check_reorg(&self) -> bool { let len = self.unfinalized_blocks.len(); if len >= 2 { @@ -375,6 +388,8 @@ impl InnerWatcher { } } + /// Fetches the most recent finalized L1 block number. + /// If running in devnet mode this will fetch the latest block number. async fn get_finalized(&self) -> Result { let block_number = match self.config.devnet { false => BlockNumber::Finalized, @@ -391,6 +406,7 @@ impl InnerWatcher { .as_u64()) } + /// Fetches the most recent L1 block number async fn get_head(&self) -> Result { Ok(self .provider @@ -402,6 +418,7 @@ impl InnerWatcher { .as_u64()) } + /// Fetches a given L1 block async fn get_block(&self, block_num: u64) -> Result> { self.provider .get_block_with_txs(block_num) @@ -409,6 +426,12 @@ impl InnerWatcher { .ok_or(eyre::eyre!("block not found")) } + /// Returns all user deposited transactions in an L1 block. + /// + /// If the [InnerWatcher] has already stored deposits in the given block, it removes and returns these. + /// + /// Otherwise `TransactionDeposited` event logs are fetched from an L1 block range + /// of `block_num` to `block_num + 1000`, and stored in the `deposits` mapping. async fn get_deposits(&mut self, block_num: u64) -> Result> { match self.deposits.remove(&block_num) { Some(deposits) => Ok(deposits), @@ -446,6 +469,7 @@ impl InnerWatcher { } impl L1Info { + /// Creates a new [L1Info] instance. pub fn new( block: &Block, user_deposits: Vec, @@ -483,6 +507,7 @@ impl L1Info { } } +/// Filters block transactions and returns calldata in transactions sent from the batch sender to the batch inbox. fn create_batcher_transactions( block: &Block, batch_sender: Address, @@ -496,6 +521,7 @@ fn create_batcher_transactions( .collect() } +/// Creates the block update channel and begins a loop to ingest L1 blocks via [InnerWatcher::try_ingest_block()] fn start_watcher( l1_start_block: u64, l2_start_block: u64, @@ -522,16 +548,22 @@ fn start_watcher( Ok((handle, block_update_receiver)) } +/// Represents which config option has been updated. enum SystemConfigUpdate { + /// An update of the batcher address BatchSender(Address), + /// An update of the fee scalar and/or overhead Fees(U256, U256), + /// A update of the L2 block gas limit Gas(U256), + /// An update of the unsafe block signer UnsafeBlockSigner(Address), } impl TryFrom for SystemConfigUpdate { type Error = eyre::Report; + /// Parses the updated config value from an event [Log] and return a [SystemConfigUpdate] fn try_from(log: Log) -> Result { let version = log .topics @@ -598,12 +630,13 @@ impl TryFrom for SystemConfigUpdate { } } +/// Creates a retryable ethers RPC [Provider] fn generate_http_provider(url: &str) -> Arc>> { let client = reqwest::ClientBuilder::new() .timeout(Duration::from_secs(5)) .build() .unwrap(); - let http = Http::new_with_client(Url::parse(url).expect("ivnalid rpc url"), client); + let http = Http::new_with_client(Url::parse(url).expect("invalid rpc url"), client); let policy = Box::new(HttpRateLimitRetryPolicy); let client = RetryClient::new(http, policy, 100, 50); Arc::new(Provider::new(client)) diff --git a/src/lib.rs b/src/lib.rs index cb55ed71..c57938bd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,45 @@ +//! # Magi +//! +//! `Magi` is a Rust implementation of an OP stack rollup node, designed to serve as a replacement for `op-node`. It facilitates interaction with both the L1 (Layer 1) chain and the canonical L2 (Layer 2) chain, enabling efficient data ingestion, processing, and serving via an RPC interface. +//! +//! This crate is structured to provide functionality for running an OP stack rollup node, including configuration management, data derivation, and P2P network communication. +//! +//! ## Features +//! +//! - **L1 Chain Ingestion**: Processes and ingests data from the L1 chain to keep the rollup node synchronized. +//! - **Canonical L2 Chain Derivation**: Derives the canonical L2 chain state based on ingested L1 data. +//! - **L2 Engine API**: Interfaces with `op-geth` for L2 state execution and consensus. +//! - **Networking**: Manages peer-to-peer networking for P2P data dissemination and retrieval. +//! - **RPC Server**: Hosts an RPC server for querying rollup node data. +//! - **Configurable Sync Modes**: Supports different synchronization modes. +//! - **Telemetry and Logging**: Provides application telemetry and logging for monitoring and debugging. +//! +//! ## Modules +//! +//! - [`l1`]: Ingests and processes L1 chain data. +//! - [`common`]: Contains common types and functions used throughout the crate. +//! - [`config`]: Manages configuration settings for the node. +//! - [`mod@derive`]: Handles the derivation pipeline for the L2 chain. +//! - [`driver`]: Drives `op-geth` via the L2 Engine API. +//! - [`engine`]: Provides an implementation of the L2 Engine API. +//! - [`network`]: Manages peer-to-peer networking. +//! - [`telemetry`]: Handles application telemetry and logging. +//! - [`rpc`]: Implements the RPC server for external queries. +//! - [`runner`]: Manages the node's operation in various synchronization modes. +//! - [`version`]: Provides version information for the `magi` crate. +//! +//! ## Getting Started +//! +//! To start using `magi`, add it as a dependency in your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! magi = "0.1.0" +//! ``` +//! +//! Then, refer to the individual modules for specific functionality. +//! +#![warn(missing_docs)] /// A module for ingesting L1 chain data pub mod l1; diff --git a/src/network/handlers/block_handler.rs b/src/network/handlers/block_handler.rs index b1c45a5a..8be9bc6b 100644 --- a/src/network/handlers/block_handler.rs +++ b/src/network/handlers/block_handler.rs @@ -12,15 +12,22 @@ use crate::{common::RawTransaction, engine::ExecutionPayload}; use super::Handler; +/// Responsible for managing blocks received via p2p gossip pub struct BlockHandler { + /// Chain ID of the L2 blockchain. Used to filter out gossip messages intended for other blockchains. chain_id: u64, + /// A channel sender to forward new blocks to other modules block_sender: Sender, + /// A [watch::Receiver] to monitor changes to the unsafe block signer. unsafe_signer_recv: watch::Receiver
, + /// The libp2p topic for pre Canyon/Shangai blocks: `/optimism/{chain_id}/0/blocks` blocks_v1_topic: IdentTopic, + /// The libp2p topic for Canyon/Delta blocks: `/optimism/{chain_id}/1/blocks` blocks_v2_topic: IdentTopic, } impl Handler for BlockHandler { + /// Checks validity of a block received via p2p gossip, and sends to the block update channel if valid. fn handle(&self, msg: Message) -> MessageAcceptance { tracing::debug!("received block"); @@ -49,12 +56,14 @@ impl Handler for BlockHandler { } } + /// The gossip topics accepted for new blocks fn topics(&self) -> Vec { vec![self.blocks_v1_topic.hash(), self.blocks_v2_topic.hash()] } } impl BlockHandler { + /// Creates a new [BlockHandler] and opens a channel @todo pub fn new( chain_id: u64, unsafe_recv: watch::Receiver
, @@ -72,6 +81,9 @@ impl BlockHandler { (handler, recv) } + /// Determines if a block is valid. + /// + /// True if the block is less than 1 minute old, and correctly signed by the unsafe block signer. fn block_valid( &self, payload: &ExecutionPayload, @@ -95,6 +107,7 @@ impl BlockHandler { } } +/// Decodes a sequence of bytes to a tuple of ([ExecutionPayload], [Signature], [PayloadHash]) fn decode_block_msg(data: Vec) -> Result<(ExecutionPayload, Signature, PayloadHash)> where T: SimpleSerialize, @@ -115,15 +128,18 @@ where Ok((payload, signature, payload_hash)) } +/// Represents the Keccak256 hash of the block struct PayloadHash(H256); impl From<&[u8]> for PayloadHash { + /// Returns the Keccak256 hash of a sequence of bytes fn from(value: &[u8]) -> Self { Self(keccak256(value).into()) } } impl PayloadHash { + /// The expected message that should be signed by the unsafe block signer. fn signature_message(&self, chain_id: u64) -> H256 { let domain = H256::zero(); let chain_id = H256::from_low_u64_be(chain_id); @@ -140,25 +156,43 @@ impl PayloadHash { } } +/// A type alias for a vector of 32 bytes, representing a Bytes32 hash type Bytes32 = Vector; +/// A type alias for a vector of 20 bytes, representing an address type VecAddress = Vector; +/// A type alias for a byte list, representing a transaction type Transaction = List; +/// The pre Canyon/Shanghai [ExecutionPayload] - the withdrawals field should not exist #[derive(SimpleSerialize, Default)] struct ExecutionPayloadV1SSZ { + /// Block hash of the parent block pub parent_hash: Bytes32, + /// Fee recipient of the block. Set to the sequencer fee vault pub fee_recipient: VecAddress, + /// State root of the block pub state_root: Bytes32, + /// Receipts root of the block pub receipts_root: Bytes32, + /// Logs bloom of the block pub logs_bloom: Vector, + /// The block mix_digest pub prev_randao: Bytes32, + /// The block number pub block_number: u64, + /// The block gas limit pub gas_limit: u64, + /// Total gas used in the block pub gas_used: u64, + /// Timestamp of the block pub timestamp: u64, + /// Any extra data included in the block pub extra_data: List, + /// Base fee per gas of the block pub base_fee_per_gas: U256, + /// Hash of the block pub block_hash: Bytes32, + /// Transactions in the block pub transactions: List, } @@ -184,34 +218,57 @@ impl From for ExecutionPayload { } } +/// The Canyon/Shanghai [ExecutionPayload] - the withdrawals field should be an empty [List] #[derive(SimpleSerialize, Default)] struct ExecutionPayloadV2SSZ { + /// Block hash of the parent block pub parent_hash: Bytes32, + /// Fee recipient of the block. Set to the sequencer fee vault pub fee_recipient: VecAddress, + /// State root of the block pub state_root: Bytes32, + /// Receipts root of the block pub receipts_root: Bytes32, + /// Logs bloom of the block pub logs_bloom: Vector, + /// The block mix_digest pub prev_randao: Bytes32, + /// The block number pub block_number: u64, + /// The block gas limit pub gas_limit: u64, + /// Total gas used in the block pub gas_used: u64, + /// Timestamp of the block pub timestamp: u64, + /// Any extra data included in the block pub extra_data: List, + /// Base fee per gas of the block pub base_fee_per_gas: U256, + /// Hash of the block pub block_hash: Bytes32, + /// Transactions in the block pub transactions: List, + /// An empty list. This is unused and only exists for L1 compatibility. pub withdrawals: List, } +/// This represents an L1 validator Withdrawal, and is unused in OP stack rollups. +/// Exists only for L1 compatibility #[derive(SimpleSerialize, Default)] struct Withdrawal { + /// Index of the withdrawal index: u64, + /// Index of the validator validator_index: u64, + /// Account address that has withdrawn address: VecAddress, + /// The amount withdrawn amount: u64, } impl From for ExecutionPayload { + /// Converts an [ExecutionPayloadV2SSZ] received via p2p gossip into an [ExecutionPayload] used by the engine. fn from(value: ExecutionPayloadV2SSZ) -> Self { Self { parent_hash: convert_hash(value.parent_hash), @@ -233,22 +290,27 @@ impl From for ExecutionPayload { } } +/// Converts [Bytes32] into [H256] fn convert_hash(bytes: Bytes32) -> H256 { H256::from_slice(bytes.as_slice()) } +/// Converts [VecAddress] into [Address] fn convert_address(address: VecAddress) -> Address { Address::from_slice(address.as_slice()) } +/// Converts an [ssz_rs::Vector] of bytes into [Bytes] fn convert_byte_vector(vector: Vector) -> Bytes { Bytes::from(vector.to_vec()) } +/// Converts an [ssz_rs::List] of bytes into [Bytes] fn convert_byte_list(list: List) -> Bytes { Bytes::from(list.to_vec()) } +/// Converts a [U256] into [ethers::types::U64] fn convert_uint(value: U256) -> ethers::types::U64 { let bytes = value.to_bytes_le(); ethers::types::U256::from_little_endian(&bytes) @@ -256,6 +318,7 @@ fn convert_uint(value: U256) -> ethers::types::U64 { .into() } +/// Converts [ssz_rs::List] of [Transaction] into a vector of [RawTransaction] fn convert_tx_list(value: List) -> Vec { value.iter().map(|tx| RawTransaction(tx.to_vec())).collect() } diff --git a/src/network/handlers/mod.rs b/src/network/handlers/mod.rs index 3ec9cb7b..365f3989 100644 --- a/src/network/handlers/mod.rs +++ b/src/network/handlers/mod.rs @@ -1,8 +1,14 @@ use libp2p::gossipsub::{Message, MessageAcceptance, TopicHash}; +/// A module for managing incoming p2p gossip messages pub mod block_handler; +/// This trait defines the functionality required to process incoming messages +/// and determine their acceptance within the network. Implementors of this trait +/// can specify how messages are handled and which topics they are interested in. pub trait Handler: Send { + /// Manages validation and further processing of messages fn handle(&self, msg: Message) -> MessageAcceptance; + /// Specifies which topics the handler is interested in fn topics(&self) -> Vec; } diff --git a/src/network/mod.rs b/src/network/mod.rs index 0d86d727..8db07389 100644 --- a/src/network/mod.rs +++ b/src/network/mod.rs @@ -1,2 +1,4 @@ +/// A module for managing incoming p2p gossip messages pub mod handlers; +/// A module for managing the Discv5 discovery & libp2p services pub mod service; diff --git a/src/network/service/discovery.rs b/src/network/service/discovery.rs index 85c70d39..d035d292 100644 --- a/src/network/service/discovery.rs +++ b/src/network/service/discovery.rs @@ -14,6 +14,8 @@ use unsigned_varint::{decode, encode}; use super::types::{NetworkAddress, Peer}; +/// Starts the [Discv5] discovery service and continually tries to find new peers. +/// Returns a [Receiver] to receive [Peer] structs pub fn start(addr: NetworkAddress, chain_id: u64) -> Result> { let bootnodes = bootnodes(); let mut disc = create_disc(chain_id)?; @@ -51,6 +53,7 @@ pub fn start(addr: NetworkAddress, chain_id: u64) -> Result> { Ok(recv) } +/// Returns `true` if a node [Enr] contains an `opstack` key and is on the same network. fn is_valid_node(node: &Enr, chain_id: u64) -> bool { node.get_raw_rlp("opstack") .map(|opstack| { @@ -61,6 +64,7 @@ fn is_valid_node(node: &Enr, chain_id: u64) -> bool { .unwrap_or_default() } +/// Generates an [Enr] and creates a [Discv5] service struct fn create_disc(chain_id: u64) -> Result { let opstack = OpStackEnrData { chain_id, @@ -77,15 +81,19 @@ fn create_disc(chain_id: u64) -> Result { Discv5::new(enr, key, config).map_err(|_| eyre::eyre!("could not create disc service")) } +/// The unique L2 network identifier #[derive(Debug)] struct OpStackEnrData { + /// Chain ID chain_id: u64, + /// The version. Always set to 0. version: u64, } impl TryFrom<&[u8]> for OpStackEnrData { type Error = eyre::Report; + /// Converts a slice of RLP encoded bytes to [OpStackEnrData] fn try_from(value: &[u8]) -> Result { let bytes: Vec = rlp::decode(value)?; let (chain_id, rest) = decode::u64(&bytes)?; @@ -96,6 +104,7 @@ impl TryFrom<&[u8]> for OpStackEnrData { } impl From for Vec { + /// Converts [OpStackEnrData] to a vector of bytes. fn from(value: OpStackEnrData) -> Vec { let mut chain_id_buf = encode::u128_buffer(); let chain_id_slice = encode::u128(value.chain_id as u128, &mut chain_id_buf); @@ -109,6 +118,7 @@ impl From for Vec { } } +/// Default bootnodes to use. Currently consists of 2 Base bootnodes & 1 Optimism bootnode. fn bootnodes() -> Vec> { let bootnodes = [ "enr:-J64QBbwPjPLZ6IOOToOLsSjtFUjjzN66qmBZdUexpO32Klrc458Q24kbty2PdRaLacHM5z-cZQr8mjeQu3pik6jPSOGAYYFIqBfgmlkgnY0gmlwhDaRWFWHb3BzdGFja4SzlAUAiXNlY3AyNTZrMaECmeSnJh7zjKrDSPoNMGXoopeDF4hhpj5I0OsQUUt4u8uDdGNwgiQGg3VkcIIkBg", diff --git a/src/network/service/mod.rs b/src/network/service/mod.rs index a0859539..f40fb397 100644 --- a/src/network/service/mod.rs +++ b/src/network/service/mod.rs @@ -14,17 +14,25 @@ use openssl::sha::sha256; use super::{handlers::Handler, service::types::NetworkAddress}; +/// A module to handle peer discovery mod discovery; +/// A module to handle commonly used types in the p2p system. mod types; +/// Responsible for management of the `Discv5` & `libp2p` services. pub struct Service { + /// Handles validation & processing of inbound messages handlers: Vec>, + /// The socket address that the service is listening on. addr: SocketAddr, + /// The chain ID of the network chain_id: u64, + /// A unique keypair to validate the node's identity keypair: Option, } impl Service { + /// Creates a new [Service] pub fn new(addr: SocketAddr, chain_id: u64) -> Self { Self { handlers: Vec::new(), @@ -34,16 +42,20 @@ impl Service { } } + /// Adds a handler to [Service] pub fn add_handler(mut self, handler: Box) -> Self { self.handlers.push(handler); self } + /// Sets the keypair for [Service] pub fn set_keypair(mut self, keypair: Keypair) -> Self { self.keypair = Some(keypair); self } + /// Starts the Discv5 peer discovery & libp2p services + /// and continually listens for new peers and messages to handle pub fn start(mut self) -> Result<()> { let addr = NetworkAddress::try_from(self.addr)?; let keypair = self.keypair.unwrap_or_else(Keypair::generate_secp256k1); @@ -81,6 +93,7 @@ impl Service { } } +/// Computes the message ID of a `gossipsub` message fn compute_message_id(msg: &Message) -> MessageId { let mut decoder = snap::raw::Decoder::new(); let id = match decoder.decompress_vec(&msg.data) { @@ -107,6 +120,7 @@ fn compute_message_id(msg: &Message) -> MessageId { MessageId(id) } +/// Creates the libp2p [Swarm] fn create_swarm(keypair: Keypair, handlers: &[Box]) -> Result> { let transport = tcp::tokio::Transport::new(tcp::Config::default()) .upgrade(libp2p::core::upgrade::Version::V1Lazy) @@ -122,14 +136,18 @@ fn create_swarm(keypair: Keypair, handlers: &[Box]) -> Result]) -> Result { let ping = ping::Behaviour::default(); @@ -173,13 +191,18 @@ impl Behaviour { } } +/// The type of message received enum Event { + /// Represents a [ping::Event] #[allow(dead_code)] Ping(ping::Event), + /// Represents a [gossipsub::Event] Gossipsub(gossipsub::Event), } impl Event { + /// Handles received gossipsub messages. Ping messages are ignored. + /// Reports back to [libp2p::gossipsub] to apply peer scoring and forward the message to other peers if accepted. fn handle(self, swarm: &mut Swarm, handlers: &[Box]) { if let Self::Gossipsub(gossipsub::Event::Message { propagation_source, @@ -203,12 +226,14 @@ impl Event { } impl From for Event { + /// Converts [ping::Event] to [Event] fn from(value: ping::Event) -> Self { Event::Ping(value) } } impl From for Event { + /// Converts [gossipsub::Event] to [Event] fn from(value: gossipsub::Event) -> Self { Event::Gossipsub(value) } diff --git a/src/network/service/types.rs b/src/network/service/types.rs index 921565c9..03dc0f5a 100644 --- a/src/network/service/types.rs +++ b/src/network/service/types.rs @@ -4,20 +4,26 @@ use discv5::enr::{CombinedKey, Enr}; use eyre::Result; use libp2p::{multiaddr::Protocol, Multiaddr}; +/// An [Ipv4Addr] and port. #[derive(Debug, Clone, Copy)] pub struct NetworkAddress { + /// An [Ipv4Addr] pub ip: Ipv4Addr, + /// A port pub port: u16, } +/// A wrapper around a peer's [NetworkAddress] #[derive(Debug)] pub struct Peer { + /// The peer's [Ipv4Addr] and port pub addr: NetworkAddress, } impl TryFrom<&Enr> for NetworkAddress { type Error = eyre::Report; + /// Convert an [Enr] to a [NetworkAddress] fn try_from(value: &Enr) -> Result { let ip = value.ip4().ok_or(eyre::eyre!("missing ip"))?; let port = value.tcp4().ok_or(eyre::eyre!("missing port"))?; @@ -27,6 +33,7 @@ impl TryFrom<&Enr> for NetworkAddress { } impl From for Multiaddr { + /// Converts a [NetworkAddress] to a [Multiaddr] fn from(value: NetworkAddress) -> Self { let mut multiaddr = Multiaddr::empty(); multiaddr.push(Protocol::Ip4(value.ip)); @@ -37,6 +44,7 @@ impl From for Multiaddr { } impl From for SocketAddr { + /// Converts a [NetworkAddress] to a [SocketAddr] fn from(value: NetworkAddress) -> Self { SocketAddr::new(IpAddr::V4(value.ip), value.port) } @@ -45,6 +53,7 @@ impl From for SocketAddr { impl TryFrom for NetworkAddress { type Error = eyre::Report; + /// Converts a [SocketAddr] to a [NetworkAddress] fn try_from(value: SocketAddr) -> Result { let ip = match value.ip() { IpAddr::V4(ip) => ip, @@ -61,6 +70,7 @@ impl TryFrom for NetworkAddress { impl TryFrom<&Enr> for Peer { type Error = eyre::Report; + /// Converts an [Enr] to a [Peer] fn try_from(value: &Enr) -> Result { let addr = NetworkAddress::try_from(value)?; Ok(Peer { addr }) @@ -68,6 +78,7 @@ impl TryFrom<&Enr> for Peer { } impl From for Multiaddr { + /// Converts a [Peer] to a [Multiaddr] fn from(value: Peer) -> Self { value.addr.into() } diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 733462c0..94e10c7a 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -21,26 +21,37 @@ use jsonrpsee::{ use serde::{Deserialize, Serialize}; +/// This trait defines a set of RPC methods that can be +/// queried by clients under the `optimism` namespace #[rpc(server, namespace = "optimism")] pub trait Rpc { + /// Returns the L2 output information for a given block. + /// See the [Optimism spec](https://specs.optimism.io/protocol/rollup-node.html?highlight=rpc#l2-output-rpc-method) for more details #[method(name = "outputAtBlock")] async fn output_at_block(&self, block_number: u64) -> Result; + /// Returns the rollup configuration options. #[method(name = "rollupConfig")] async fn rollup_config(&self) -> Result; + /// Returns details about the Magi version of the node. #[method(name = "version")] async fn version(&self) -> Result; } +/// The Magi RPC server which implements the same `optimism` namespace methods as `op-node` #[derive(Debug)] pub struct RpcServerImpl { + /// The Magi version of the node version: Version, + /// The Magi [Config] config: Arc, } #[async_trait] impl RpcServer for RpcServerImpl { + /// Returns the L2 output information for a given block. + /// See the [Optimism spec](https://specs.optimism.io/protocol/rollup-node.html?highlight=rpc#l2-output-rpc-method) for more details async fn output_at_block(&self, block_number: u64) -> Result { let l2_provider = convert_err(Provider::try_from(self.config.l2_rpc_url.clone()))?; @@ -77,21 +88,26 @@ impl RpcServer for RpcServerImpl { }) } + /// Returns the rollup configuration options. async fn rollup_config(&self) -> Result { let config = (*self.config).clone(); Ok(ExternalChainConfig::from(config.chain)) } + /// Returns details about the Magi version of the node. async fn version(&self) -> Result { Ok(self.version.to_string()) } } +/// Converts a generic error to a [jsonrpsee::core::error] if one exists fn convert_err(res: Result) -> Result { res.map_err(|err| Error::Custom(err.to_string())) } +/// Computes the L2 output root. +/// Refer to the [Optimism Spec](https://specs.optimism.io/protocol/proposals.html#l2-output-commitment-construction) for details fn compute_l2_output_root(block: Block, storage_root: H256) -> H256 { let version: H256 = Default::default(); let digest = keccak256( @@ -107,6 +123,7 @@ fn compute_l2_output_root(block: Block, storage_root: H256) -> H256 { H256::from_slice(&digest) } +/// Starts the Magi RPC server pub async fn run_server(config: Arc) -> Result { let port = config.rpc_port; let addr = config.rpc_addr.clone(); @@ -129,12 +146,17 @@ pub async fn run_server(config: Arc) -> Result { Ok(addr) } +/// The response for the `optimism_outputAtBlock` RPC method. #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct OutputRootResponse { + /// The output root which serves as a commitment to the current state of the chain pub output_root: H256, + /// The output root version number, beginning with 0 pub version: H256, + /// The state root pub state_root: H256, + /// The 32 byte storage root of the `L2toL1MessagePasser` contract address pub withdrawal_storage_root: H256, } diff --git a/src/runner/mod.rs b/src/runner/mod.rs index 3d063321..e574e08e 100644 --- a/src/runner/mod.rs +++ b/src/runner/mod.rs @@ -16,16 +16,25 @@ use crate::{ engine::{Engine, EngineApi, ExecutionPayload, ForkchoiceState, Status}, }; +/// Temporary trusted/static peer used for checkpoint sync mode. +// TODO: use a list of whitelisted bootnodes instead const TRUSTED_PEER_ENODE: &str = "enode://e85ba0beec172b17f53b373b0ab72238754259aa39f1ae5290e3244e0120882f4cf95acd203661a27c8618b27ca014d4e193266cb3feae43655ed55358eedb06@3.86.143.120:30303?discport=21693"; +/// The main entrypoint for starting a Magi node. +/// Responsible for starting the syncing process. pub struct Runner { + /// The Magi [Config] config: Config, + /// The [SyncMode] - currently full & checkpoint sync are supported sync_mode: SyncMode, + /// The L2 block hash to begin syncing from checkpoint_hash: Option, + /// Receiver to listen for SIGINT signals shutdown_recv: Receiver, } impl Runner { + /// Creates a new [Runner] from a [Config] and registers the SIGINT signal handler. pub fn from_config(config: Config) -> Self { let (shutdown_sender, shutdown_recv) = channel(false); ctrlc::set_handler(move || { @@ -44,16 +53,19 @@ impl Runner { } } + /// Sets the [SyncMode] pub fn with_sync_mode(mut self, sync_mode: SyncMode) -> Self { self.sync_mode = sync_mode; self } + /// Sets the `checkpoint_hash` if running in checkpoint [SyncMode] pub fn with_checkpoint_hash(mut self, checkpoint_hash: Option) -> Self { self.checkpoint_hash = checkpoint_hash; self } + /// Begins the syncing process pub async fn run(self) -> Result<()> { match self.sync_mode { SyncMode::Fast => self.fast_sync().await, @@ -63,16 +75,21 @@ impl Runner { } } + /// Fast sync mode - currently unsupported pub async fn fast_sync(&self) -> Result<()> { tracing::error!("fast sync is not implemented yet"); unimplemented!(); } + /// Fast challenge mode - currently unsupported pub async fn challenge_sync(&self) -> Result<()> { tracing::error!("challenge sync is not implemented yet"); unimplemented!(); } + /// Full sync mode. + /// Syncs via L1 block derivation from the latest finalized block the execution client has synced to. + /// Otherwise syncs from genesis pub async fn full_sync(&self) -> Result<()> { tracing::info!("starting full sync"); @@ -80,6 +97,10 @@ impl Runner { Ok(()) } + /// Checkpoint sync mode. + /// Syncs the execution client to a given checkpoint block, and then begins the normal derivation sync process via the [Driver] + /// + /// Note: the `admin` RPC method must be available on the execution client as checkpoint_sync relies on `admin_addPeer` pub async fn checkpoint_sync(&self) -> Result<()> { tracing::info!("starting checkpoint sync"); @@ -180,6 +201,7 @@ impl Runner { Ok(()) } + /// Creates and starts the [Driver] which handles the derivation sync process. async fn start_driver(&self) -> Result<()> { let mut driver = Driver::from_config(self.config.clone(), self.shutdown_recv.clone()).await?; @@ -192,6 +214,7 @@ impl Runner { Ok(()) } + /// Exits if a SIGINT signal is received fn check_shutdown(&self) -> Result<()> { if *self.shutdown_recv.borrow() { tracing::warn!("shutting down"); @@ -201,6 +224,7 @@ impl Runner { Ok(()) } + /// Returns `true` if the L2 block is the first in an epoch (sequence number 0) async fn is_epoch_boundary + Send + Sync>( block: T, checkpoint_sync_url: &Provider, diff --git a/src/telemetry/logging.rs b/src/telemetry/logging.rs index aa535736..a0fa5ed6 100644 --- a/src/telemetry/logging.rs +++ b/src/telemetry/logging.rs @@ -125,6 +125,7 @@ impl tracing::field::Visit for AnsiVisitor { /// An Ansi Term layer for tracing #[derive(Debug)] pub struct AnsiTermLayer { + /// Whether verbose tracing is enabled. Prints additional metadata if `true` verbose: bool, } diff --git a/src/telemetry/metrics.rs b/src/telemetry/metrics.rs index a3e850a4..5e29e32e 100644 --- a/src/telemetry/metrics.rs +++ b/src/telemetry/metrics.rs @@ -6,13 +6,17 @@ use prometheus_exporter::{ }; lazy_static! { + /// Tracks the block number of the most recent finalized head. pub static ref FINALIZED_HEAD: IntGauge = register_int_gauge!("finalized_head", "finalized head number").unwrap(); + /// Tracks the block number considered to be the safe head. pub static ref SAFE_HEAD: IntGauge = register_int_gauge!("safe_head", "safe head number").unwrap(); + /// Monitors if the node is fully synced pub static ref SYNCED: IntGauge = register_int_gauge!("synced", "synced flag").unwrap(); } +/// Starts the metrics server on port 9200 pub fn init() -> Result<()> { start("0.0.0.0:9200".parse().wrap_err("Could not parse address")?)?; Ok(()) diff --git a/src/telemetry/mod.rs b/src/telemetry/mod.rs index 9567dde8..d6482fc3 100644 --- a/src/telemetry/mod.rs +++ b/src/telemetry/mod.rs @@ -9,7 +9,7 @@ //! //! Logging is constructed using the [tracing](https://crates.io/crates/tracing) crate. //! The `tracing` crate is a framework for instrumenting Rust programs to collect -//! structured, event-based diagnostic information. You can use the [logging::init] function +//! structured, event-based diagnostic information. You can use the [crate::telemetry::init] function //! to initialize a global logger, passing in a boolean `verbose` parameter. This function //! will return an error if a logger has already been initialized. //! diff --git a/src/version/mod.rs b/src/version/mod.rs index 3678ca81..93b785a6 100644 --- a/src/version/mod.rs +++ b/src/version/mod.rs @@ -1,11 +1,16 @@ +/// Represents the Magi version #[derive(Debug)] pub struct Version { + /// The package name specified in `Cargo.toml` name: String, + /// The package version specified in `Cargo.toml` version: String, + /// `Dev` if compiled in debug mode. `Release` otherwise. meta: String, } impl Version { + /// Build and returns a [Version] struct pub fn build() -> Self { let meta = if cfg!(debug_assertions) { "dev" @@ -22,6 +27,7 @@ impl Version { } impl ToString for Version { + /// Formatted as: {name}{version}-{meta} fn to_string(&self) -> String { format!("{}{}-{}", self.name, self.version, self.meta) }