From 5531be357978a89a5143813b2a3b9700d0aa09e4 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Thu, 5 Oct 2023 19:07:37 +0200 Subject: [PATCH 1/8] ci: Uses PAT token for release please (#165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Uses PAT token for release please. ## Why ❔ To allow other workflows to be triggered by tags creation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/release-please.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 1194864aa80..266a4db8158 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -18,6 +18,7 @@ jobs: id: release uses: google-github-actions/release-please-action@v3 with: + token: ${{ secrets.RELEASE_TOKEN }} command: manifest config-file: .github/release-please/config.json manifest-file: .github/release-please/manifest.json From 0418be11faec444762f344266e9b0d1c3f238c33 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:13:41 +0200 Subject: [PATCH 2/8] chore(main): release core 15.1.1 (#161) :robot: I have created a release *beep* *boop* --- ## [15.1.1](https://github.com/matter-labs/zksync-era/compare/core-v15.1.0...core-v15.1.1) (2023-10-05) ### Bug Fixes * use gauge instead histogram for replication lag metric ([#159](https://github.com/matter-labs/zksync-era/issues/159)) ([0d952d4](https://github.com/matter-labs/zksync-era/commit/0d952d43a021c2fbf18920da3e7d770a6309d990)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 99563351109..3cf890dfd6e 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,6 +1,6 @@ { "sdk/zksync-web3.js": "0.15.4", "sdk/zksync-rs": "0.4.0", - "core": "15.1.0", + "core": "15.1.1", "prover": "7.1.1" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 36060639819..a2bf915ae1b 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [15.1.1](https://github.com/matter-labs/zksync-era/compare/core-v15.1.0...core-v15.1.1) (2023-10-05) + + +### Bug Fixes + +* use gauge instead histogram for replication lag metric ([#159](https://github.com/matter-labs/zksync-era/issues/159)) ([0d952d4](https://github.com/matter-labs/zksync-era/commit/0d952d43a021c2fbf18920da3e7d770a6309d990)) + ## [15.1.0](https://github.com/matter-labs/zksync-era/compare/core-v15.0.2...core-v15.1.0) (2023-10-03) From f98c4fab0f10d190ceb2ae9bfa77929bf793a6ea Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:33:49 +0200 Subject: [PATCH 3/8] fix(vm): Make execution status and stop reason public (#169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ * TracerExecutionStatus and TracerExecutionStopReason are part of the ExecutionEndTracer trait, but they were not publicly available. ## Why ❔ * This breaks external implementations of the ExecutionEndTracer --- core/lib/vm/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index 34c25f4addc..38e6982ce81 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -15,7 +15,10 @@ pub use errors::{ pub use tracers::{ call::CallTracer, - traits::{BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, VmTracer}, + traits::{ + BoxedTracer, DynTracer, ExecutionEndTracer, ExecutionProcessing, TracerExecutionStatus, + TracerExecutionStopReason, VmTracer, + }, utils::VmExecutionStopReason, validation::ViolatedValidationRule, StorageInvocations, ValidationError, ValidationTracer, ValidationTracerParams, From f94b8192c9a20259f692f77f87eb0dc9bc7e3418 Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Fri, 6 Oct 2023 12:23:47 +0200 Subject: [PATCH 4/8] fix: Add exec to replace shell inside entrypoint with the actual binary (#134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ Replaces container shell with EN and local-env executables in corresponding Docker images to eg. properly handle termination signals. More details in original PR: https://github.com/matter-labs/zksync-era/pull/76 Courtesy of https://github.com/voron Thanks for the contribution, and sorry it took so long to review - we've been busy with FOSS'ing our repos. ## Why ❔ https://github.com/matter-labs/zksync-era/pull/76#issue-1849818996 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Co-authored-by: Alex Vorona Co-authored-by: Roman Brodetski --- docker/external-node/entrypoint.sh | 2 +- docker/local-node/entrypoint.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/external-node/entrypoint.sh b/docker/external-node/entrypoint.sh index a64390145ca..bf6e98616f3 100644 --- a/docker/external-node/entrypoint.sh +++ b/docker/external-node/entrypoint.sh @@ -5,4 +5,4 @@ set -e # Prepare the database if it's not ready. No-op if the DB is prepared. sqlx database setup # Run the external node. -zksync_external_node +exec zksync_external_node diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index 440dd3fa318..664cf4b3b6d 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -43,4 +43,4 @@ fi # start server source /etc/env/dev.env source /etc/env/.init.env -zksync_server +exec zksync_server From f14bf6851059a7add6677c89b3192e1b23cbf3c5 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Fri, 6 Oct 2023 15:18:24 +0300 Subject: [PATCH 5/8] feat: change chainId to u64 (#167) --- Cargo.lock | 5 +- core/bin/external_node/src/config/mod.rs | 15 +- .../system-constants-generator/src/utils.rs | 2 +- core/lib/basic_types/Cargo.toml | 1 + core/lib/basic_types/src/lib.rs | 162 ++++++++++++++++-- core/lib/config/src/configs/chain.rs | 6 +- core/lib/dal/src/blocks_web3_dal.rs | 4 +- .../lib/dal/src/models/storage_transaction.rs | 4 +- core/lib/dal/src/tests/mod.rs | 2 +- core/lib/dal/src/transactions_web3_dal.rs | 6 +- core/lib/state/src/in_memory.rs | 4 +- core/lib/test_account/src/lib.rs | 2 +- core/lib/types/src/api/mod.rs | 2 +- core/lib/types/src/l2/mod.rs | 8 +- core/lib/types/src/storage/mod.rs | 2 +- core/lib/types/src/transaction_request.rs | 79 +++++---- .../eip712_signature/typed_structure.rs | 2 +- .../src/tx/primitives/packed_eth_signature.rs | 11 +- core/lib/vm/src/tests/l1_tx_execution.rs | 2 +- core/lib/vm/src/tests/require_eip712.rs | 11 +- core/lib/vm/src/tests/tester/vm_tester.rs | 2 +- .../src/api_server/web3/namespaces/eth.rs | 2 +- .../src/api_server/web3/namespaces/net.rs | 2 +- .../zksync_core/src/api_server/web3/state.rs | 4 +- core/lib/zksync_core/src/genesis.rs | 30 +++- core/lib/zksync_core/src/lib.rs | 6 +- .../src/metadata_calculator/helpers.rs | 8 +- .../src/metadata_calculator/tests.rs | 4 +- .../batch_executor/tests/tester.rs | 4 +- .../src/state_keeper/io/tests/tester.rs | 4 +- core/lib/zksync_core/src/state_keeper/mod.rs | 3 +- .../zksync_core/src/state_keeper/tests/mod.rs | 4 +- .../src/state_keeper/tests/tester.rs | 4 +- core/multivm_deps/vm_1_3_2/src/test_utils.rs | 8 +- core/multivm_deps/vm_m5/src/test_utils.rs | 8 +- core/multivm_deps/vm_m6/src/test_utils.rs | 8 +- core/tests/loadnext/src/account_pool.rs | 4 +- core/tests/loadnext/src/config.rs | 6 +- core/tests/vm-benchmark/harness/src/lib.rs | 4 +- 39 files changed, 306 insertions(+), 139 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b584dc9a2e..45d34bfcd8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5645,9 +5645,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.97" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -7640,6 +7640,7 @@ name = "zksync_basic_types" version = "0.1.0" dependencies = [ "serde", + "serde_json", "web3", ] diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 35b1e91bc08..66f4e54ff57 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -52,13 +52,14 @@ impl RemoteENConfig { .get_main_contract() .await .context("Failed to fetch L1 contract address")?; - let l2_chain_id = L2ChainId( + let l2_chain_id = L2ChainId::try_from( client .chain_id() .await .context("Failed to fetch L2 chain ID")? - .as_u64() as u16, - ); + .as_u64(), + ) + .unwrap(); let l1_chain_id = L1ChainId( client .l1_chain_id() @@ -396,14 +397,14 @@ impl ExternalNodeConfig { .await .context("Unable to check L1 chain ID through the configured L1 client")?; - let l2_chain_id: u16 = env_var("EN_L2_CHAIN_ID"); + let l2_chain_id: L2ChainId = env_var("EN_L2_CHAIN_ID"); let l1_chain_id: u64 = env_var("EN_L1_CHAIN_ID"); - if l2_chain_id != remote.l2_chain_id.0 { + if l2_chain_id != remote.l2_chain_id { anyhow::bail!( "Configured L2 chain id doesn't match the one from main node. Make sure your configuration is correct and you are corrected to the right main node. - Main node L2 chain id: {}. Local config value: {}", - remote.l2_chain_id.0, l2_chain_id + Main node L2 chain id: {:?}. Local config value: {:?}", + remote.l2_chain_id, l2_chain_id ); } if l1_chain_id != remote.l1_chain_id.0 { diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index afb00b5cda7..d55a73d4e8f 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -92,7 +92,7 @@ pub(super) fn get_l2_tx(contract_address: Address, signer: &H256, pubdata_price: gas_per_pubdata_limit: pubdata_price.into(), }, U256::from(0), - L2ChainId(270), + L2ChainId::from(270), signer, None, Default::default(), diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index e96dd0c0ce2..4e8d8af8c15 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -12,3 +12,4 @@ categories = ["cryptography"] [dependencies] web3 = { version= "0.19.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index a8f7cacbae5..3223dfddf59 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -7,7 +7,7 @@ mod macros; pub mod network; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize}; use std::convert::{Infallible, TryFrom, TryInto}; use std::fmt; use std::num::ParseIntError; @@ -76,6 +76,85 @@ impl TryFrom for AccountTreeId { } } +/// ChainId in the ZkSync network. +#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct L2ChainId(u64); + +impl<'de> Deserialize<'de> for L2ChainId { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + s.parse().map_err(de::Error::custom) + } +} + +impl FromStr for L2ChainId { + type Err = String; + + fn from_str(s: &str) -> Result { + // Parse the string as a U64 + // try to parse as decimal first + let number = match U64::from_dec_str(s) { + Ok(u) => u, + Err(_) => { + // try to parse as hex + s.parse::() + .map_err(|err| format!("Failed to parse L2ChainId: Err {err}"))? + } + }; + + if number.as_u64() > L2ChainId::max().0 { + return Err(format!("Too big chain ID. MAX: {}", L2ChainId::max().0)); + } + Ok(L2ChainId(number.as_u64())) + } +} + +impl L2ChainId { + /// The maximum value of the L2 chain ID. + // 2^53 - 1 is a max safe integer in JS. In ethereum JS libs chain ID should be the safe integer. + // Next arithmetic operation: subtract 36 and divide by 2 comes from `v` calculation: + // v = 2*chainId + 36, that should be save integer as well. + const MAX: u64 = ((1 << 53) - 1 - 36) / 2; + + pub fn max() -> Self { + Self(Self::MAX) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + +impl Default for L2ChainId { + fn default() -> Self { + Self(270) + } +} + +impl TryFrom for L2ChainId { + type Error = String; + + fn try_from(val: u64) -> Result { + if val > L2ChainId::max().0 { + return Err(format!( + "Cannot convert given value {} into L2ChainId. It's greater than MAX: {},", + val, + L2ChainId::max().0, + )); + } + Ok(Self(val)) + } +} + +impl From for L2ChainId { + fn from(value: u32) -> Self { + Self(value as u64) + } +} + basic_type!( /// zkSync network block sequential index. MiniblockNumber, @@ -112,12 +191,6 @@ basic_type!( u64 ); -basic_type!( - /// ChainId in the ZkSync network. - L2ChainId, - u16 -); - #[allow(clippy::derivable_impls)] impl Default for MiniblockNumber { fn default() -> Self { @@ -139,15 +212,78 @@ impl Default for L1BlockNumber { } } -impl Default for L2ChainId { - fn default() -> Self { - Self(270) - } -} - #[allow(clippy::derivable_impls)] impl Default for PriorityOpId { fn default() -> Self { Self(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::from_str; + + #[test] + fn test_from_str_valid_decimal() { + let input = "42"; + let result = L2ChainId::from_str(input); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_from_str_valid_hexadecimal() { + let input = "0x2A"; + let result = L2ChainId::from_str(input); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_from_str_too_big_chain_id() { + let input = "18446744073709551615"; // 2^64 - 1 + let result = L2ChainId::from_str(input); + assert_eq!( + result, + Err(format!("Too big chain ID. MAX: {}", L2ChainId::max().0)) + ); + } + + #[test] + fn test_from_str_invalid_input() { + let input = "invalid"; // Invalid input that cannot be parsed as a number + let result = L2ChainId::from_str(input); + + assert!(result.is_err()); + assert!(result + .unwrap_err() + .contains("Failed to parse L2ChainId: Err ")); + } + + #[test] + fn test_deserialize_valid_decimal() { + let input_json = "\"42\""; + + let result: Result = from_str(input_json); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_deserialize_valid_hex() { + let input_json = "\"0x2A\""; + + let result: Result = from_str(input_json); + assert_eq!(result.unwrap().as_u64(), 42); + } + + #[test] + fn test_deserialize_invalid() { + let input_json = "\"invalid\""; + + let result: Result = from_str(input_json); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Failed to parse L2ChainId: Err Invalid character ")); + } +} diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 3e680b435e4..afb92871694 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -5,7 +5,7 @@ use serde::Deserialize; use std::time::Duration; // Local uses use zksync_basic_types::network::Network; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::{Address, L2ChainId, H256}; use zksync_contracts::BaseSystemContractsHashes; use super::envy_load; @@ -47,7 +47,7 @@ pub struct NetworkConfig { pub zksync_network: String, /// ID of current zkSync network treated as ETH network ID. /// Used to distinguish zkSync from other Web3-capable networks. - pub zksync_network_id: u16, + pub zksync_network_id: L2ChainId, } impl NetworkConfig { @@ -202,7 +202,7 @@ mod tests { network: NetworkConfig { network: "localhost".parse().unwrap(), zksync_network: "localhost".to_string(), - zksync_network_id: 270, + zksync_network_id: L2ChainId::from(270), }, state_keeper: StateKeeperConfig { transaction_slots: 50, diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 301f6940d1a..03ec1c1930f 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -623,7 +623,7 @@ mod tests { for block_id in block_ids { let block = conn .blocks_web3_dal() - .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .get_block_by_web3_block_id(block_id, false, L2ChainId::from(270)) .await; let block = block.unwrap().unwrap(); assert!(block.transactions.is_empty()); @@ -650,7 +650,7 @@ mod tests { for block_id in non_existing_block_ids { let block = conn .blocks_web3_dal() - .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .get_block_by_web3_block_id(block_id, false, L2ChainId::from(270)) .await; assert!(block.unwrap().is_none()); diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 738cf3356f3..554d33649f2 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -389,7 +389,7 @@ impl<'r> FromRow<'r, PgRow> for StorageApiTransaction { .unwrap_or_default() .map(U64::from), access_list: None, - chain_id: U256::from(0), + chain_id: 0, l1_batch_number: db_row .try_get::("l1_batch_number_tx") .ok() @@ -502,7 +502,7 @@ pub fn web3_transaction_select_sql() -> &'static str { pub fn extract_web3_transaction(db_row: PgRow, chain_id: L2ChainId) -> api::Transaction { let mut storage_api_tx = StorageApiTransaction::from_row(&db_row).unwrap(); - storage_api_tx.inner_api_transaction.chain_id = U256::from(chain_id.0); + storage_api_tx.inner_api_transaction.chain_id = chain_id.as_u64(); storage_api_tx.into() } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 3f39f98a45a..fecd33f4761 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -59,7 +59,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { zksync_types::Nonce(0), fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), &H256::random(), None, Default::default(), diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index c9a6ee8bf76..8ad983a2218 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -417,7 +417,7 @@ mod tests { for transaction_id in transaction_ids { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; let web3_tx = web3_tx.unwrap().unwrap(); assert_eq!(web3_tx.hash, tx_hash); @@ -431,7 +431,7 @@ mod tests { for transaction_id in transactions_with_bogus_index { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; assert!(web3_tx.unwrap().is_none()); } @@ -448,7 +448,7 @@ mod tests { for transaction_id in transactions_with_bogus_block { let web3_tx = conn .transactions_web3_dal() - .get_transaction(transaction_id, L2ChainId(270)) + .get_transaction(transaction_id, L2ChainId::from(270)) .await; assert!(web3_tx.unwrap().is_none()); } diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index 3ae72a9f4e9..e44187e34d9 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -9,7 +9,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; /// Network ID we use by defailt for in memory storage. -pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u16 = 270; +pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. #[derive(Debug, Default)] @@ -22,7 +22,7 @@ impl InMemoryStorage { /// Constructs a storage that contains system smart contracts. pub fn with_system_contracts(bytecode_hasher: impl Fn(&[u8]) -> H256) -> Self { Self::with_system_contracts_and_chain_id( - L2ChainId(IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID), + L2ChainId::from(IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID), bytecode_hasher, ) } diff --git a/core/lib/test_account/src/lib.rs b/core/lib/test_account/src/lib.rs index 9c94e3f49cf..509402b7b6b 100644 --- a/core/lib/test_account/src/lib.rs +++ b/core/lib/test_account/src/lib.rs @@ -77,7 +77,7 @@ impl Account { nonce, fee.unwrap_or_else(|| self.default_fee()), value, - L2ChainId(270), + L2ChainId::default(), &self.private_key, factory_deps, Default::default(), diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index c9b157c6629..6c65356081b 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -505,7 +505,7 @@ pub struct Transaction { pub max_priority_fee_per_gas: Option, /// Id of the current chain #[serde(rename = "chainId")] - pub chain_id: U256, + pub chain_id: u64, /// Number of the l1 batch this transaction was included within. #[serde( rename = "l1BatchNumber", diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 973c5a30b10..b1ef8ca07a7 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -93,7 +93,7 @@ impl L2TxCommonData { self.input = Some(InputData { hash, data: input }) } - pub fn extract_chain_id(&self) -> Option { + pub fn extract_chain_id(&self) -> Option { let bytes = self.input_data()?; let chain_id = match bytes.first() { Some(x) if *x >= 0x80 => { @@ -226,7 +226,7 @@ impl L2Tx { pub fn get_rlp_bytes(&self, chain_id: L2ChainId) -> Bytes { let mut rlp_stream = RlpStream::new(); let tx: TransactionRequest = self.clone().into(); - tx.rlp(&mut rlp_stream, chain_id.0, None); + tx.rlp(&mut rlp_stream, chain_id.as_u64(), None); Bytes(rlp_stream.as_raw().to_vec()) } @@ -329,7 +329,7 @@ impl From for TransactionRequest { transaction_type: None, access_list: None, eip712_meta: None, - chain_id: tx.common_data.extract_chain_id().unwrap_or_default().into(), + chain_id: tx.common_data.extract_chain_id(), }; match tx_type as u8 { LEGACY_TX_TYPE => {} @@ -389,7 +389,7 @@ impl From for api::Transaction { Self { hash: tx.hash(), - chain_id: tx.common_data.extract_chain_id().unwrap_or_default().into(), + chain_id: tx.common_data.extract_chain_id().unwrap_or_default(), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), to: Some(tx.recipient_account()), diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index b1ed25dad97..bf790b58d3d 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -103,7 +103,7 @@ pub fn get_system_context_init_logs(chain_id: L2ChainId) -> Vec { vec![ StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION), - H256::from_low_u64_be(chain_id.0 as u64), + H256::from_low_u64_be(chain_id.as_u64()), ), StorageLog::new_write_log( get_system_context_key(SYSTEM_CONTEXT_BLOCK_GAS_LIMIT_POSITION), diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 0516081434d..c9af634c3e4 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -168,7 +168,7 @@ pub enum SerializationTransactionError { #[error("invalid signature")] MalformedSignature, #[error("wrong chain id {}", .0.unwrap_or_default())] - WrongChainId(Option), + WrongChainId(Option), #[error("malformed paymaster params")] MalforedPaymasterParams, #[error("factory dependency #{0} is invalid: {1}")] @@ -233,7 +233,7 @@ pub struct TransactionRequest { pub eip712_meta: Option, /// Chain ID #[serde(default, skip_serializing_if = "Option::is_none")] - pub chain_id: Option, + pub chain_id: Option, } #[derive(Default, Serialize, Deserialize, Clone, PartialEq, Debug, Eq)] @@ -426,7 +426,7 @@ impl TransactionRequest { pub fn get_signed_bytes(&self, signature: &PackedEthSignature, chain_id: L2ChainId) -> Vec { let mut rlp = RlpStream::new(); - self.rlp(&mut rlp, *chain_id, Some(signature)); + self.rlp(&mut rlp, chain_id.as_u64(), Some(signature)); let mut data = rlp.out().to_vec(); if let Some(tx_type) = self.transaction_type { data.insert(0, tx_type.as_u64() as u8); @@ -438,7 +438,7 @@ impl TransactionRequest { self.transaction_type.is_none() || self.transaction_type == Some(LEGACY_TX_TYPE.into()) } - pub fn rlp(&self, rlp: &mut RlpStream, chain_id: u16, signature: Option<&PackedEthSignature>) { + pub fn rlp(&self, rlp: &mut RlpStream, chain_id: u64, signature: Option<&PackedEthSignature>) { rlp.begin_unbounded_list(); match self.transaction_type { @@ -553,7 +553,7 @@ impl TransactionRequest { pub fn from_bytes( bytes: &[u8], - chain_id: u16, + chain_id: L2ChainId, ) -> Result<(Self, H256), SerializationTransactionError> { let rlp; let mut tx = match bytes.first() { @@ -567,7 +567,7 @@ impl TransactionRequest { let v = rlp.val_at(6)?; let (_, tx_chain_id) = PackedEthSignature::unpack_v(v) .map_err(|_| SerializationTransactionError::MalformedSignature)?; - if tx_chain_id.is_some() && tx_chain_id != Some(chain_id) { + if tx_chain_id.is_some() && tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } Self { @@ -592,7 +592,7 @@ impl TransactionRequest { } let tx_chain_id = rlp.val_at(0).ok(); - if tx_chain_id != Some(chain_id) { + if tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } Self { @@ -613,7 +613,7 @@ impl TransactionRequest { )); } let tx_chain_id = rlp.val_at(10).ok(); - if tx_chain_id.is_some() && tx_chain_id != Some(chain_id) { + if tx_chain_id.is_some() && tx_chain_id != Some(chain_id.as_u64()) { return Err(SerializationTransactionError::WrongChainId(tx_chain_id)); } @@ -658,21 +658,20 @@ impl TransactionRequest { None => tx.recover_default_signer(default_signed_message).ok(), }; - let hash = - tx.get_tx_hash_with_signed_message(&default_signed_message, L2ChainId(chain_id))?; + let hash = tx.get_tx_hash_with_signed_message(&default_signed_message, chain_id)?; Ok((tx, hash)) } fn get_default_signed_message( &self, - chain_id: Option, + chain_id: Option, ) -> Result { if self.is_eip712_tx() { let tx_chain_id = chain_id.ok_or(SerializationTransactionError::WrongChainId(chain_id))?; Ok(PackedEthSignature::typed_data_to_signed_bytes( - &Eip712Domain::new(L2ChainId(tx_chain_id)), + &Eip712Domain::new(L2ChainId::try_from(tx_chain_id).unwrap()), self, )) } else { @@ -707,7 +706,7 @@ impl TransactionRequest { } pub fn get_tx_hash(&self, chain_id: L2ChainId) -> Result { - let default_signed_message = self.get_default_signed_message(Some(chain_id.0))?; + let default_signed_message = self.get_default_signed_message(Some(chain_id.as_u64()))?; self.get_tx_hash_with_signed_message(&default_signed_message, chain_id) } @@ -979,8 +978,11 @@ mod tests { access_list: None, }; let signed_tx = accounts.sign_transaction(tx.clone(), &key).await.unwrap(); - let (tx2, _) = - TransactionRequest::from_bytes(signed_tx.raw_transaction.0.as_slice(), 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes( + signed_tx.raw_transaction.0.as_slice(), + L2ChainId::from(270), + ) + .unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price.unwrap(), tx2.gas_price); assert_eq!(tx.nonce.unwrap(), tx2.nonce); @@ -1013,16 +1015,13 @@ mod tests { let mut rlp = RlpStream::new(); tx.rlp(&mut rlp, 270, Some(&signature)); let data = rlp.out().to_vec(); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, L2ChainId::from(270)).unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price, tx2.gas_price); assert_eq!(tx.nonce, tx2.nonce); assert_eq!(tx.input, tx2.input); assert_eq!(tx.value, tx2.value); - assert_eq!( - tx2.v.unwrap().as_u32() as u16, - signature.v_with_chain_id(270) - ); + assert_eq!(tx2.v.unwrap().as_u64(), signature.v_with_chain_id(270)); assert_eq!(tx2.s.unwrap(), signature.s().into()); assert_eq!(tx2.r.unwrap(), signature.r().into()); assert_eq!(address, tx2.from.unwrap()); @@ -1056,8 +1055,10 @@ mod tests { ..Default::default() }; - let msg = - PackedEthSignature::typed_data_to_signed_bytes(&Eip712Domain::new(L2ChainId(270)), &tx); + let msg = PackedEthSignature::typed_data_to_signed_bytes( + &Eip712Domain::new(L2ChainId::from(270)), + &tx, + ); let signature = PackedEthSignature::sign_raw(&private_key, &msg).unwrap(); let mut rlp = RlpStream::new(); @@ -1069,7 +1070,7 @@ mod tests { tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, L2ChainId::from(270)).unwrap(); assert_eq!(tx, tx2); } @@ -1098,14 +1099,15 @@ mod tests { chain_id: Some(270), ..Default::default() }; - let domain = Eip712Domain::new(L2ChainId(270)); + let domain = Eip712Domain::new(L2ChainId::from(270)); let signature = PackedEthSignature::sign_typed_data(&private_key, &domain, &transaction_request) .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(270)); - let (decoded_tx, _) = TransactionRequest::from_bytes(encoded_tx.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(encoded_tx.as_slice(), L2ChainId::from(270)).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } @@ -1137,14 +1139,15 @@ mod tests { chain_id: Some(270), ..Default::default() }; - let domain = Eip712Domain::new(L2ChainId(270)); + let domain = Eip712Domain::new(L2ChainId::from(270)); let signature = PackedEthSignature::sign_typed_data(&private_key, &domain, &transaction_request) .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(270)); - let decoded_tx = TransactionRequest::from_bytes(encoded_tx.as_slice(), 272); + let decoded_tx = + TransactionRequest::from_bytes(encoded_tx.as_slice(), L2ChainId::from(272)); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(270))) @@ -1184,7 +1187,8 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let (decoded_tx, _) = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } @@ -1221,7 +1225,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270); + let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(272))) @@ -1261,7 +1265,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); + let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1298,7 +1302,7 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_2930_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); + let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) @@ -1419,8 +1423,10 @@ mod tests { ..Default::default() }; - let msg = - PackedEthSignature::typed_data_to_signed_bytes(&Eip712Domain::new(L2ChainId(270)), &tx); + let msg = PackedEthSignature::typed_data_to_signed_bytes( + &Eip712Domain::new(L2ChainId::from(270)), + &tx, + ); let signature = PackedEthSignature::sign_raw(&private_key, &msg).unwrap(); let mut rlp = RlpStream::new(); @@ -1431,7 +1437,8 @@ mod tests { tx.v = Some(U64::from(signature.v())); tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let request = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); + let request = + TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert!(matches!( L2Tx::from_request(request.0, random_tx_max_size), Err(SerializationTransactionError::OversizedData(_, _)) diff --git a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs index 5ad48995a5c..999afbbe604 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs @@ -170,7 +170,7 @@ impl Eip712Domain { Self { name: Self::NAME.to_string(), version: Self::VERSION.to_string(), - chain_id: U256::from(*chain_id), + chain_id: U256::from(chain_id.as_u64()), } } } diff --git a/core/lib/types/src/tx/primitives/packed_eth_signature.rs b/core/lib/types/src/tx/primitives/packed_eth_signature.rs index 63f4911ea47..b249d151ef5 100644 --- a/core/lib/types/src/tx/primitives/packed_eth_signature.rs +++ b/core/lib/types/src/tx/primitives/packed_eth_signature.rs @@ -150,12 +150,10 @@ impl PackedEthSignature { pub fn v(&self) -> u8 { self.0.v() } - pub fn v_with_chain_id(&self, chain_id: u16) -> u16 { - self.0.v() as u16 + 35 + chain_id * 2 + pub fn v_with_chain_id(&self, chain_id: u64) -> u64 { + self.0.v() as u64 + 35 + chain_id * 2 } - pub fn unpack_v(v: u64) -> Result<(u8, Option), ParityCryptoError> { - use std::convert::TryInto; - + pub fn unpack_v(v: u64) -> Result<(u8, Option), ParityCryptoError> { if v == 27 { return Ok((0, None)); } else if v == 28 { @@ -163,9 +161,6 @@ impl PackedEthSignature { } else if v >= 35 { let chain_id = (v - 35) >> 1; let v = v - 35 - chain_id * 2; - let chain_id = chain_id - .try_into() - .map_err(|_| ParityCryptoError::Custom("Invalid chain_id".to_string()))?; if v == 0 { return Ok((0, Some(chain_id))); } else if v == 1 { diff --git a/core/lib/vm/src/tests/l1_tx_execution.rs b/core/lib/vm/src/tests/l1_tx_execution.rs index 5afe6af7918..a231d8aba0b 100644 --- a/core/lib/vm/src/tests/l1_tx_execution.rs +++ b/core/lib/vm/src/tests/l1_tx_execution.rs @@ -41,7 +41,7 @@ fn test_l1_tx_execution() { is_service: true, tx_number_in_block: 0, sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(L2ChainId(0)), + key: tx_data.tx_hash(L2ChainId::from(0)), value: u256_to_h256(U256::from(1u32)), }]; diff --git a/core/lib/vm/src/tests/require_eip712.rs b/core/lib/vm/src/tests/require_eip712.rs index d77e4d6a33a..4c2515ae2ef 100644 --- a/core/lib/vm/src/tests/require_eip712.rs +++ b/core/lib/vm/src/tests/require_eip712.rs @@ -52,7 +52,7 @@ async fn test_require_eip712() { assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - let chain_id: u16 = 270; + let chain_id: u32 = 270; // First, let's set the owners of the AA account to the private_address. // (so that messages signed by private_address, are authorized to act on behalf of the AA account). @@ -94,7 +94,7 @@ async fn test_require_eip712() { }; let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270).unwrap(); + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); l2_tx.set_input(aa_tx, hash); @@ -134,15 +134,16 @@ async fn test_require_eip712() { let transaction_request: TransactionRequest = tx_712.into(); - let domain = Eip712Domain::new(L2ChainId(chain_id)); + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) .await .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(chain_id)); + let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, chain_id).unwrap(); + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); diff --git a/core/lib/vm/src/tests/tester/vm_tester.rs b/core/lib/vm/src/tests/tester/vm_tester.rs index 19450244120..07dbf89a8eb 100644 --- a/core/lib/vm/src/tests/tester/vm_tester.rs +++ b/core/lib/vm/src/tests/tester/vm_tester.rs @@ -142,7 +142,7 @@ impl VmTesterBuilder { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, deployer: None, rich_accounts: vec![], diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 89434de7911..b68cfc247be 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -369,7 +369,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn chain_id_impl(&self) -> U64 { - self.state.api_config.l2_chain_id.0.into() + self.state.api_config.l2_chain_id.as_u64().into() } #[tracing::instrument(skip(self))] diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs index b31279ab693..88a732505ab 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/net.rs @@ -11,7 +11,7 @@ impl NetNamespace { } pub fn version_impl(&self) -> String { - self.zksync_network_id.to_string() + self.zksync_network_id.as_u64().to_string() } pub fn peer_count_impl(&self) -> U256 { diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 8ea44db4a63..6ed90ec1d3c 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -65,7 +65,7 @@ impl InternalApiConfig { ) -> Self { Self { l1_chain_id: eth_config.network.chain_id(), - l2_chain_id: L2ChainId(eth_config.zksync_network_id), + l2_chain_id: eth_config.zksync_network_id, max_tx_size: web3_config.max_tx_size, estimate_gas_scale_factor: web3_config.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: web3_config @@ -195,7 +195,7 @@ impl Clone for RpcState { impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; - let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id.0)?; + let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; Ok(( L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index f613b2b6a48..ccc9e949d2d 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -389,7 +389,7 @@ mod tests { first_l1_verifier_config: L1VerifierConfig::default(), first_verifier_address: Address::random(), }; - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + ensure_genesis_state(&mut conn, L2ChainId::from(270), ¶ms) .await .unwrap(); @@ -403,8 +403,34 @@ mod tests { assert_ne!(root_hash, H256::zero()); // Check that `ensure_genesis_state()` doesn't panic on repeated runs. - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + ensure_genesis_state(&mut conn, L2ChainId::from(270), ¶ms) .await .unwrap(); } + + #[db_test] + async fn running_genesis_with_big_chain_id(pool: ConnectionPool) { + let mut conn: StorageProcessor<'_> = pool.access_storage().await.unwrap(); + conn.blocks_dal().delete_genesis().await.unwrap(); + + let params = GenesisParams { + protocol_version: ProtocolVersionId::latest(), + first_validator: Address::random(), + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_l1_verifier_config: L1VerifierConfig::default(), + first_verifier_address: Address::random(), + }; + ensure_genesis_state(&mut conn, L2ChainId::max(), ¶ms) + .await + .unwrap(); + + assert!(!conn.blocks_dal().is_genesis_needed().await.unwrap()); + let metadata = conn + .blocks_dal() + .get_l1_batch_metadata(L1BatchNumber(0)) + .await; + let root_hash = metadata.unwrap().unwrap().metadata.root_hash; + assert_ne!(root_hash, H256::zero()); + } } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index f40074d600c..028a746ced0 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -42,7 +42,7 @@ use zksync_types::{ proofs::AggregationRound, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PackedEthSignature, ProtocolVersionId, + Address, PackedEthSignature, ProtocolVersionId, }; use zksync_verification_key_server::get_cached_commitments; @@ -124,7 +124,7 @@ pub async fn genesis_init( genesis::ensure_genesis_state( &mut storage, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, &genesis::GenesisParams { // We consider the operator to be the first validator for now. first_validator: operator_address, @@ -365,7 +365,7 @@ pub async fn initialize_components( let tx_sender_config = TxSenderConfig::new( &state_keeper_config, &api_config.web3_json_rpc, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, ); let internal_api_config = InternalApiConfig::new( &network_config, diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index bd79b8866f4..0abcc30c644 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -366,7 +366,7 @@ mod tests { async fn loaded_logs_equivalence_basics(pool: ConnectionPool) { ensure_genesis_state( &mut pool.access_storage().await.unwrap(), - L2ChainId(270), + L2ChainId::from(270), &mock_genesis_params(), ) .await @@ -389,7 +389,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); @@ -467,7 +467,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); @@ -514,7 +514,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_protective_reads(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) .await .unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index 00d34d7f870..e5e6e1f43ba 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -397,7 +397,7 @@ async fn setup_calculator_with_options( let mut storage = pool.access_storage().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { - let chain_id = L2ChainId(270); + let chain_id = L2ChainId::from(270); let protocol_version = ProtocolVersionId::latest(); let base_system_contracts = BaseSystemContracts::load_from_disk(); let system_contracts = get_system_smart_contracts(); @@ -650,7 +650,7 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let first_verifier_address = Address::zero(); ensure_genesis_state( &mut storage, - L2ChainId(270), + L2ChainId::from(270), &GenesisParams { protocol_version, first_validator, diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index 2fd2df20e6c..d41b0c98a82 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -28,7 +28,7 @@ use crate::state_keeper::{ }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; -const CHAIN_ID: L2ChainId = L2ChainId(270); +const CHAIN_ID: u32 = 270; /// Representation of configuration parameters used by the state keeper. /// Has sensible defaults for most tests, each of which can be overridden. @@ -144,7 +144,7 @@ impl Tester { create_genesis_l1_batch( &mut storage, self.fee_account, - CHAIN_ID, + L2ChainId::from(CHAIN_ID), ProtocolVersionId::latest(), &BASE_SYSTEM_CONTRACTS, &get_system_smart_contracts(), diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 5f1881afb3f..fb9ec33c54b 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -91,7 +91,7 @@ impl Tester { Duration::from_secs(1), l2_erc20_bridge_addr, BLOCK_GAS_LIMIT, - L2ChainId(270), + L2ChainId::from(270), ) .await; @@ -108,7 +108,7 @@ impl Tester { create_genesis_l1_batch( &mut storage, Address::repeat_byte(0x01), - L2ChainId(270), + L2ChainId::from(270), ProtocolVersionId::latest(), &self.base_system_contracts, &get_system_smart_contracts(), diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 5ccae06a3f4..8eef5d6adbc 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -8,7 +8,6 @@ use zksync_config::{ ContractsConfig, DBConfig, }; use zksync_dal::ConnectionPool; -use zksync_types::L2ChainId; mod batch_executor; pub(crate) mod extractors; @@ -71,7 +70,7 @@ where mempool_config.delay_interval(), contracts_config.l2_erc20_bridge_addr, state_keeper_config.validation_computational_gas_limit, - L2ChainId(network_config.zksync_network_id), + network_config.zksync_network_id, ) .await; diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index 4f8f1fe364d..d269b1fea67 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -58,7 +58,7 @@ pub(super) fn default_system_env() -> SystemEnv { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), } } @@ -147,7 +147,7 @@ pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L Nonce(0), fee, U256::zero(), - L2ChainId(271), + L2ChainId::from(271), &H256::repeat_byte(0x11), None, PaymasterParams::default(), diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index b855ce54560..62bd4307b4e 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -286,7 +286,7 @@ pub(crate) fn pending_batch_data( gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, pending_miniblocks, } @@ -601,7 +601,7 @@ impl StateKeeperIO for TestIO { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, L1BatchEnv { previous_batch_hash: Some(H256::zero()), diff --git a/core/multivm_deps/vm_1_3_2/src/test_utils.rs b/core/multivm_deps/vm_1_3_2/src/test_utils.rs index 2ebaebb4e37..5acefe94a4b 100644 --- a/core/multivm_deps/vm_1_3_2/src/test_utils.rs +++ b/core/multivm_deps/vm_1_3_2/src/test_utils.rs @@ -172,7 +172,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -209,7 +209,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -276,7 +276,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -303,7 +303,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/multivm_deps/vm_m5/src/test_utils.rs b/core/multivm_deps/vm_m5/src/test_utils.rs index 13cb91a5782..83ef7575805 100644 --- a/core/multivm_deps/vm_m5/src/test_utils.rs +++ b/core/multivm_deps/vm_m5/src/test_utils.rs @@ -171,7 +171,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -208,7 +208,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -275,7 +275,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -302,7 +302,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/multivm_deps/vm_m6/src/test_utils.rs b/core/multivm_deps/vm_m6/src/test_utils.rs index b196ed9e357..7d7b98685ef 100644 --- a/core/multivm_deps/vm_m6/src/test_utils.rs +++ b/core/multivm_deps/vm_m6/src/test_utils.rs @@ -171,7 +171,7 @@ pub fn mock_loadnext_test_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -208,7 +208,7 @@ pub fn mock_loadnext_gas_burn_call( nonce, fee, Default::default(), - L2ChainId(270), + L2ChainId::from(270), ð_private_key, None, Default::default(), @@ -275,7 +275,7 @@ pub fn get_deploy_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), @@ -302,7 +302,7 @@ pub fn get_error_tx( nonce, fee, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &account_private_key, Some(factory_deps), Default::default(), diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index c19765d2bb3..556bee7f402 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, str::FromStr, sync::Arc, time::Duration}; +use std::{collections::VecDeque, convert::TryFrom, str::FromStr, sync::Arc, time::Duration}; use once_cell::sync::OnceCell; use rand::Rng; @@ -90,7 +90,7 @@ pub struct AccountPool { impl AccountPool { /// Generates all the required test accounts and prepares `Wallet` objects. pub async fn new(config: &LoadtestConfig) -> anyhow::Result { - let l2_chain_id = L2ChainId(config.l2_chain_id); + let l2_chain_id = L2ChainId::try_from(config.l2_chain_id).unwrap(); // Create a client for pinging the rpc. let client = HttpClientBuilder::default() .build(&config.l2_rpc_address) diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index 1c4b5ae7733..d62f4cdb63e 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -103,7 +103,7 @@ pub struct LoadtestConfig { /// Chain id of L2 node. #[serde(default = "default_l2_chain_id")] - pub l2_chain_id: u16, + pub l2_chain_id: u64, /// RPC address of L2 node. #[serde(default = "default_l2_rpc_address")] @@ -227,9 +227,9 @@ fn default_seed() -> Option { result } -fn default_l2_chain_id() -> u16 { +fn default_l2_chain_id() -> u64 { // 270 for rinkeby - let result = *L2ChainId::default(); + let result = L2ChainId::default().as_u64(); tracing::info!("Using default L2_CHAIN_ID: {result}"); result } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index 55b4eb238d7..e439e1359fb 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -84,7 +84,7 @@ impl BenchmarkingVm { gas_limit: BLOCK_GAS_LIMIT, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId(270), + chain_id: L2ChainId::from(270), }, Rc::new(RefCell::new(StorageView::new(&*STORAGE))), HistoryEnabled, @@ -120,7 +120,7 @@ pub fn get_deploy_tx(code: &[u8]) -> Transaction { gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), }, U256::zero(), - L2ChainId(270), + L2ChainId::from(270), &PRIVATE_KEY, Some(vec![code.to_vec()]), // maybe not needed? Default::default(), From 1e30d0ba8d243f41ad1e86e77d24848d64bd11e6 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 6 Oct 2023 17:05:49 +0300 Subject: [PATCH 6/8] feat(merkle tree): Provide Merkle proofs for tree entries and entry ranges (#119) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ - Enables the Merkle tree to provide proofs for entries. The procedure efficiently handles batched requests. - Allows to verify range proofs using a streaming approach. ## Why ❔ These are preparation steps for snapshot syncing. "Plain" Merkle tree proofs could be used in `eth_getProof` implementation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/merkle_tree/src/domain.rs | 13 +- core/lib/merkle_tree/src/errors.rs | 29 ++ core/lib/merkle_tree/src/getters.rs | 130 +++++++- core/lib/merkle_tree/src/hasher/mod.rs | 272 ++++++++++++++++ .../src/{hasher.rs => hasher/nodes.rs} | 308 +----------------- core/lib/merkle_tree/src/hasher/proofs.rs | 219 +++++++++++++ core/lib/merkle_tree/src/lib.rs | 7 +- core/lib/merkle_tree/src/storage/mod.rs | 19 +- core/lib/merkle_tree/src/storage/patch.rs | 87 ++++- core/lib/merkle_tree/src/storage/proofs.rs | 59 +--- .../src/{types.rs => types/internal.rs} | 120 +------ core/lib/merkle_tree/src/types/mod.rs | 163 +++++++++ .../tests/integration/merkle_tree.rs | 181 +++++++++- 13 files changed, 1082 insertions(+), 525 deletions(-) create mode 100644 core/lib/merkle_tree/src/hasher/mod.rs rename core/lib/merkle_tree/src/{hasher.rs => hasher/nodes.rs} (56%) create mode 100644 core/lib/merkle_tree/src/hasher/proofs.rs rename core/lib/merkle_tree/src/{types.rs => types/internal.rs} (83%) create mode 100644 core/lib/merkle_tree/src/types/mod.rs diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index add622f9139..6b26bbd873f 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -4,7 +4,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; use crate::{ storage::{MerkleTreeColumnFamily, PatchSet, Patched, RocksDBWrapper}, - types::{Key, LeafData, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, + types::{Key, Root, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, BlockOutput, HashTree, MerkleTree, }; use zksync_crypto::hasher::blake2::Blake2Hasher; @@ -159,17 +159,6 @@ impl ZkSyncTree { }); } - /// Reads leaf nodes with the specified keys from the tree storage. The nodes - /// are returned in a `Vec` in the same order as requested. - pub fn read_leaves( - &self, - l1_batch_number: L1BatchNumber, - leaf_keys: &[Key], - ) -> Vec> { - let version = u64::from(l1_batch_number.0); - self.tree.read_leaves(version, leaf_keys) - } - /// Processes an iterator of storage logs comprising a single L1 batch. pub fn process_l1_batch(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { match self.mode { diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b2aba5c1284..a30b0b98f5b 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -135,6 +135,35 @@ impl fmt::Display for DeserializeError { impl error::Error for DeserializeError {} +/// Error accessing a specific tree version. +#[derive(Debug)] +pub struct NoVersionError { + pub(crate) missing_version: u64, + pub(crate) version_count: u64, +} + +impl fmt::Display for NoVersionError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let &Self { + missing_version, + version_count, + } = self; + if missing_version >= version_count { + write!( + formatter, + "Version {missing_version} does not exist in Merkle tree; it has {version_count} versions" + ) + } else { + write!( + formatter, + "Version {missing_version} was pruned from Merkle tree" + ) + } + } +} + +impl error::Error for NoVersionError {} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/merkle_tree/src/getters.rs b/core/lib/merkle_tree/src/getters.rs index 3c32b64b8d8..66b9c303c50 100644 --- a/core/lib/merkle_tree/src/getters.rs +++ b/core/lib/merkle_tree/src/getters.rs @@ -1,37 +1,135 @@ //! Getters for the Merkle tree. use crate::{ + hasher::HasherWithStats, storage::{LoadAncestorsResult, SortedKeys, WorkingPatchSet}, - types::{LeafData, Node}, - Database, Key, MerkleTree, + types::{Nibbles, Node, TreeEntry, TreeEntryWithProof}, + Database, Key, MerkleTree, NoVersionError, ValueHash, }; impl MerkleTree<'_, DB> where DB: Database, { - /// Reads leaf nodes with the specified keys from the tree storage. The nodes - /// are returned in a `Vec` in the same order as requested. - pub fn read_leaves(&self, version: u64, leaf_keys: &[Key]) -> Vec> { - let Some(root) = self.db.root(version) else { - return vec![None; leaf_keys.len()]; - }; + /// Reads entries with the specified keys from the tree. The entries are returned in the same order + /// as requested. + /// + /// # Errors + /// + /// Returns an error if the tree `version` is missing. + pub fn entries( + &self, + version: u64, + leaf_keys: &[Key], + ) -> Result, NoVersionError> { + self.load_and_transform_entries( + version, + leaf_keys, + |patch_set, leaf_key, longest_prefix| { + let node = patch_set.get(longest_prefix); + match node { + Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => (*leaf).into(), + _ => TreeEntry::empty(), + } + }, + ) + } + + fn load_and_transform_entries( + &self, + version: u64, + leaf_keys: &[Key], + mut transform: impl FnMut(&mut WorkingPatchSet, &Key, &Nibbles) -> T, + ) -> Result, NoVersionError> { + let root = self.db.root(version).ok_or_else(|| { + let manifest = self.db.manifest().unwrap_or_default(); + NoVersionError { + missing_version: version, + version_count: manifest.version_count, + } + })?; let sorted_keys = SortedKeys::new(leaf_keys.iter().copied()); let mut patch_set = WorkingPatchSet::new(version, root); let LoadAncestorsResult { longest_prefixes, .. } = patch_set.load_ancestors(&sorted_keys, &self.db); - leaf_keys + Ok(leaf_keys .iter() .zip(&longest_prefixes) - .map(|(leaf_key, longest_prefix)| { - let node = patch_set.get(longest_prefix); - match node { - Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => Some((*leaf).into()), - _ => None, + .map(|(leaf_key, longest_prefix)| transform(&mut patch_set, leaf_key, longest_prefix)) + .collect()) + } + + /// Reads entries together with Merkle proofs with the specified keys from the tree. The entries are returned + /// in the same order as requested. + /// + /// # Errors + /// + /// Returns an error if the tree `version` is missing. + pub fn entries_with_proofs( + &self, + version: u64, + leaf_keys: &[Key], + ) -> Result, NoVersionError> { + let mut hasher = HasherWithStats::from(self.hasher); + self.load_and_transform_entries( + version, + leaf_keys, + |patch_set, &leaf_key, longest_prefix| { + let (leaf, merkle_path) = + patch_set.create_proof(&mut hasher, leaf_key, longest_prefix, 0); + let value_hash = leaf + .as_ref() + .map_or_else(ValueHash::zero, |leaf| leaf.value_hash); + TreeEntry { + value_hash, + leaf_index: leaf.map_or(0, |leaf| leaf.leaf_index), } - }) - .collect() + .with_merkle_path(merkle_path.into_inner()) + }, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::PatchSet; + + #[test] + fn entries_in_empty_tree() { + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(vec![]); + let missing_key = Key::from(123); + + let entries = tree.entries(0, &[missing_key]).unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries[0].is_empty()); + + let entries = tree.entries_with_proofs(0, &[missing_key]).unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries[0].base.is_empty()); + entries[0].verify(tree.hasher, missing_key, tree.hasher.empty_tree_hash()); + } + + #[test] + fn entries_in_single_node_tree() { + let mut tree = MerkleTree::new(PatchSet::default()); + let key = Key::from(987_654); + let output = tree.extend(vec![(key, ValueHash::repeat_byte(1))]); + let missing_key = Key::from(123); + + let entries = tree.entries(0, &[key, missing_key]).unwrap(); + assert_eq!(entries.len(), 2); + assert_eq!(entries[0].value_hash, ValueHash::repeat_byte(1)); + assert_eq!(entries[0].leaf_index, 1); + + let entries = tree.entries_with_proofs(0, &[key, missing_key]).unwrap(); + assert_eq!(entries.len(), 2); + assert!(!entries[0].base.is_empty()); + entries[0].verify(tree.hasher, key, output.root_hash); + assert!(entries[1].base.is_empty()); + entries[1].verify(tree.hasher, missing_key, output.root_hash); } } diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs new file mode 100644 index 00000000000..cf64c5ec3ae --- /dev/null +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -0,0 +1,272 @@ +//! Hashing operations on the Merkle tree. + +use once_cell::sync::Lazy; + +use std::{fmt, iter}; + +mod nodes; +mod proofs; + +pub(crate) use self::nodes::{InternalNodeCache, MerklePath}; +pub use self::proofs::TreeRangeDigest; +use crate::{ + metrics::HashingStats, + types::{Key, ValueHash, TREE_DEPTH}, +}; +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + +/// Tree hashing functionality. +pub trait HashTree: Send + Sync { + /// Returns the unique name of the hasher. This is used in Merkle tree tags to ensure + /// that the tree remains consistent. + fn name(&self) -> &'static str; + + /// Hashes a leaf node. + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash; + /// Compresses hashes in an intermediate node of a binary Merkle tree. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash; + + /// Returns the hash of an empty subtree with the given depth. Implementations + /// are encouraged to cache the returned values. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash; +} + +impl dyn HashTree + '_ { + pub(crate) fn empty_tree_hash(&self) -> ValueHash { + self.empty_subtree_hash(TREE_DEPTH) + } + + /// Extends the provided `path` to length `TREE_DEPTH`. + fn extend_merkle_path<'a>( + &'a self, + path: &'a [ValueHash], + ) -> impl Iterator + 'a { + let empty_hash_count = TREE_DEPTH - path.len(); + let empty_hashes = (0..empty_hash_count).map(|depth| self.empty_subtree_hash(depth)); + empty_hashes.chain(path.iter().copied()) + } + + fn fold_merkle_path( + &self, + path: &[ValueHash], + key: Key, + value_hash: ValueHash, + leaf_index: u64, + ) -> ValueHash { + let mut hash = self.hash_leaf(&value_hash, leaf_index); + let full_path = self.extend_merkle_path(path); + for (depth, adjacent_hash) in full_path.enumerate() { + hash = if key.bit(depth) { + self.hash_branch(&adjacent_hash, &hash) + } else { + self.hash_branch(&hash, &adjacent_hash) + }; + } + hash + } + + pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { + HasherWithStats { + shared_metrics: Some(stats), + ..HasherWithStats::from(self) + } + } +} + +impl fmt::Debug for dyn HashTree + '_ { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("HashTree").finish_non_exhaustive() + } +} + +/// No-op hasher that returns `H256::zero()` for all operations. +impl HashTree for () { + fn name(&self) -> &'static str { + "no_op256" + } + + fn hash_leaf(&self, _value_hash: &ValueHash, _leaf_index: u64) -> ValueHash { + ValueHash::zero() + } + + fn hash_branch(&self, _lhs: &ValueHash, _rhs: &ValueHash) -> ValueHash { + ValueHash::zero() + } + + fn empty_subtree_hash(&self, _depth: usize) -> ValueHash { + ValueHash::zero() + } +} + +impl HashTree for Blake2Hasher { + fn name(&self) -> &'static str { + "blake2s256" + } + + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + let mut bytes = [0_u8; 40]; + bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); + bytes[8..].copy_from_slice(value_hash.as_ref()); + self.hash_bytes(&bytes) + } + + /// Compresses the hashes of 2 children in a branch node. + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + self.compress(lhs, rhs) + } + + /// Returns the hash of an empty subtree with the given depth. + fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes); + EMPTY_TREE_HASHES[depth] + } +} + +fn compute_empty_tree_hashes() -> Vec { + let empty_leaf_hash = Blake2Hasher.hash_bytes(&[0_u8; 40]); + iter::successors(Some(empty_leaf_hash), |hash| { + Some(Blake2Hasher.hash_branch(hash, hash)) + }) + .take(TREE_DEPTH + 1) + .collect() +} + +/// Hasher that keeps track of hashing metrics. +/// +/// On drop, the metrics are merged into `shared_stats` (if present). Such roundabout handling +/// is motivated by efficiency; if atomics were to be used to track metrics (e.g., +/// via a wrapping `HashTree` implementation), this would tank performance because of contention. +#[derive(Debug)] +pub(crate) struct HasherWithStats<'a> { + inner: &'a dyn HashTree, + shared_metrics: Option<&'a HashingStats>, + local_hashed_bytes: u64, +} + +impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { + fn from(inner: &'a dyn HashTree) -> Self { + Self { + inner, + shared_metrics: None, + local_hashed_bytes: 0, + } + } +} + +impl<'a> AsRef for HasherWithStats<'a> { + fn as_ref(&self) -> &(dyn HashTree + 'a) { + self.inner + } +} + +impl Drop for HasherWithStats<'_> { + fn drop(&mut self) { + if let Some(shared_stats) = self.shared_metrics { + shared_stats.add_hashed_bytes(self.local_hashed_bytes); + } + } +} + +impl HasherWithStats<'_> { + fn hash_leaf(&mut self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + const HASHED_BYTES: u64 = 8 + ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_leaf(value_hash, leaf_index) + } + + fn hash_branch(&mut self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + const HASHED_BYTES: u64 = 2 * ValueHash::len_bytes() as u64; + + self.local_hashed_bytes += HASHED_BYTES; + self.inner.hash_branch(lhs, rhs) + } + + fn hash_optional_branch( + &mut self, + subtree_depth: usize, + lhs: Option, + rhs: Option, + ) -> Option { + match (lhs, rhs) { + (None, None) => None, + (Some(lhs), None) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&lhs, &empty_hash)) + } + (None, Some(rhs)) => { + let empty_hash = self.empty_subtree_hash(subtree_depth); + Some(self.hash_branch(&empty_hash, &rhs)) + } + (Some(lhs), Some(rhs)) => Some(self.hash_branch(&lhs, &rhs)), + } + } + + pub fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + self.inner.empty_subtree_hash(depth) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::LeafNode; + use zksync_types::{AccountTreeId, Address, StorageKey, H256}; + + #[test] + fn empty_tree_hash_is_as_expected() { + const EXPECTED_HASH: ValueHash = H256([ + 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, + 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106, + ]); + + let hasher: &dyn HashTree = &Blake2Hasher; + assert_eq!(hasher.empty_tree_hash(), EXPECTED_HASH); + } + + #[test] + fn leaf_is_hashed_as_expected() { + // Reference value taken from the previous implementation. + const EXPECTED_HASH: ValueHash = H256([ + 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, + 105, 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, + ]); + + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let stats = HashingStats::default(); + let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); + let leaf_hash = leaf.hash(&mut hasher, 0); + assert_eq!(leaf_hash, EXPECTED_HASH); + + drop(hasher); + assert!(stats.hashed_bytes.into_inner() > 100); + + let hasher: &dyn HashTree = &Blake2Hasher; + let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); + assert_eq!(folded_hash, EXPECTED_HASH); + } + + #[test] + fn folding_merkle_path() { + let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); + let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); + let key = key.hashed_key_u256(); + let leaf = LeafNode::new(key, H256([1; 32]), 1); + + let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let leaf_hash = leaf.hash(&mut hasher, 2); + assert!(key.bit(254) && !key.bit(255)); + let merkle_path = [H256([2; 32]), H256([3; 32])]; + let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); + let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); + + let folded_hash = hasher + .inner + .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); + assert_eq!(folded_hash, expected_hash); + } +} diff --git a/core/lib/merkle_tree/src/hasher.rs b/core/lib/merkle_tree/src/hasher/nodes.rs similarity index 56% rename from core/lib/merkle_tree/src/hasher.rs rename to core/lib/merkle_tree/src/hasher/nodes.rs index 5c06f2e3561..715e0c958fa 100644 --- a/core/lib/merkle_tree/src/hasher.rs +++ b/core/lib/merkle_tree/src/hasher/nodes.rs @@ -1,201 +1,11 @@ -//! Hashing operations on the Merkle tree. +//! Hash helpers for tree nodes. -use once_cell::sync::Lazy; - -use std::{fmt, iter, slice}; +use std::slice; use crate::{ - metrics::HashingStats, - types::{ - BlockOutputWithProofs, ChildRef, InternalNode, Key, LeafNode, Node, TreeInstruction, - TreeLogEntry, ValueHash, TREE_DEPTH, - }, + hasher::HasherWithStats, + types::{ChildRef, InternalNode, LeafNode, Node, ValueHash, TREE_DEPTH}, }; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; - -/// Tree hashing functionality. -pub trait HashTree: Send + Sync { - /// Returns the unique name of the hasher. This is used in Merkle tree tags to ensure - /// that the tree remains consistent. - fn name(&self) -> &'static str; - - /// Hashes a leaf node. - fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash; - /// Compresses hashes in an intermediate node of a binary Merkle tree. - fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash; - - /// Returns the hash of an empty subtree with the given depth. Implementations - /// are encouraged to cache the returned values. - fn empty_subtree_hash(&self, depth: usize) -> ValueHash; -} - -impl dyn HashTree + '_ { - pub(crate) fn empty_tree_hash(&self) -> ValueHash { - self.empty_subtree_hash(TREE_DEPTH) - } - - fn fold_merkle_path( - &self, - path: &[ValueHash], - key: Key, - value_hash: ValueHash, - leaf_index: u64, - ) -> ValueHash { - let mut hash = self.hash_leaf(&value_hash, leaf_index); - let empty_hash_count = TREE_DEPTH - path.len(); - let empty_hashes = (0..empty_hash_count).map(|depth| self.empty_subtree_hash(depth)); - let full_path = empty_hashes.chain(path.iter().copied()); - for (depth, adjacent_hash) in full_path.enumerate() { - hash = if key.bit(depth) { - self.hash_branch(&adjacent_hash, &hash) - } else { - self.hash_branch(&hash, &adjacent_hash) - }; - } - hash - } - - pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { - HasherWithStats { - shared_metrics: Some(stats), - ..HasherWithStats::from(self) - } - } -} - -impl fmt::Debug for dyn HashTree + '_ { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.debug_struct("HashTree").finish_non_exhaustive() - } -} - -/// No-op hasher that returns `H256::zero()` for all operations. -impl HashTree for () { - fn name(&self) -> &'static str { - "no_op256" - } - - fn hash_leaf(&self, _value_hash: &ValueHash, _leaf_index: u64) -> ValueHash { - ValueHash::zero() - } - - fn hash_branch(&self, _lhs: &ValueHash, _rhs: &ValueHash) -> ValueHash { - ValueHash::zero() - } - - fn empty_subtree_hash(&self, _depth: usize) -> ValueHash { - ValueHash::zero() - } -} - -impl HashTree for Blake2Hasher { - fn name(&self) -> &'static str { - "blake2s256" - } - - fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { - let mut bytes = [0_u8; 40]; - bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); - bytes[8..].copy_from_slice(value_hash.as_ref()); - self.hash_bytes(&bytes) - } - - /// Compresses the hashes of 2 children in a branch node. - fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { - self.compress(lhs, rhs) - } - - /// Returns the hash of an empty subtree with the given depth. - fn empty_subtree_hash(&self, depth: usize) -> ValueHash { - static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes); - EMPTY_TREE_HASHES[depth] - } -} - -fn compute_empty_tree_hashes() -> Vec { - let empty_leaf_hash = Blake2Hasher.hash_bytes(&[0_u8; 40]); - iter::successors(Some(empty_leaf_hash), |hash| { - Some(Blake2Hasher.hash_branch(hash, hash)) - }) - .take(TREE_DEPTH + 1) - .collect() -} - -/// Hasher that keeps track of hashing metrics. -/// -/// On drop, the metrics are merged into `shared_stats` (if present). Such roundabout handling -/// is motivated by efficiency; if atomics were to be used to track metrics (e.g., -/// via a wrapping `HashTree` implementation), this would tank performance because of contention. -#[derive(Debug)] -pub(crate) struct HasherWithStats<'a> { - inner: &'a dyn HashTree, - shared_metrics: Option<&'a HashingStats>, - local_hashed_bytes: u64, -} - -impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { - fn from(inner: &'a dyn HashTree) -> Self { - Self { - inner, - shared_metrics: None, - local_hashed_bytes: 0, - } - } -} - -impl<'a> AsRef for HasherWithStats<'a> { - fn as_ref(&self) -> &(dyn HashTree + 'a) { - self.inner - } -} - -impl Drop for HasherWithStats<'_> { - fn drop(&mut self) { - if let Some(shared_stats) = self.shared_metrics { - shared_stats.add_hashed_bytes(self.local_hashed_bytes); - } - } -} - -impl HasherWithStats<'_> { - fn hash_leaf(&mut self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { - const HASHED_BYTES: u64 = 8 + ValueHash::len_bytes() as u64; - - self.local_hashed_bytes += HASHED_BYTES; - self.inner.hash_leaf(value_hash, leaf_index) - } - - fn hash_branch(&mut self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { - const HASHED_BYTES: u64 = 2 * ValueHash::len_bytes() as u64; - - self.local_hashed_bytes += HASHED_BYTES; - self.inner.hash_branch(lhs, rhs) - } - - fn hash_optional_branch( - &mut self, - subtree_depth: usize, - lhs: Option, - rhs: Option, - ) -> Option { - match (lhs, rhs) { - (None, None) => None, - (Some(lhs), None) => { - let empty_hash = self.empty_subtree_hash(subtree_depth); - Some(self.hash_branch(&lhs, &empty_hash)) - } - (None, Some(rhs)) => { - let empty_hash = self.empty_subtree_hash(subtree_depth); - Some(self.hash_branch(&empty_hash, &rhs)) - } - (Some(lhs), Some(rhs)) => Some(self.hash_branch(&lhs, &rhs)), - } - } - - pub fn empty_subtree_hash(&self, depth: usize) -> ValueHash { - self.inner.empty_subtree_hash(depth) - } -} impl LeafNode { pub(crate) fn hash(&self, hasher: &mut HasherWithStats<'_>, level: usize) -> ValueHash { @@ -446,116 +256,12 @@ impl Node { } } -impl BlockOutputWithProofs { - /// Verifies this output against the trusted old root hash of the tree and - /// the applied instructions. - /// - /// # Panics - /// - /// Panics if the proof doesn't verify. - pub fn verify_proofs( - &self, - hasher: &dyn HashTree, - old_root_hash: ValueHash, - instructions: &[(Key, TreeInstruction)], - ) { - assert_eq!(instructions.len(), self.logs.len()); - - let mut root_hash = old_root_hash; - for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { - assert!(op.merkle_path.len() <= TREE_DEPTH); - if matches!(instruction, TreeInstruction::Read) { - assert_eq!(op.root_hash, root_hash); - assert!(op.base.is_read()); - } else { - assert!(!op.base.is_read()); - } - - let (prev_leaf_index, leaf_index, prev_value) = match op.base { - TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), - TreeLogEntry::Updated { - leaf_index, - previous_value, - } => (leaf_index, leaf_index, previous_value), - - TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), - TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), - }; - - let prev_hash = - hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); - assert_eq!(prev_hash, root_hash); - if let TreeInstruction::Write(value) = instruction { - let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); - assert_eq!(next_hash, op.root_hash); - } - root_hash = op.root_hash; - } - } -} - #[cfg(test)] mod tests { use super::*; - use crate::types::ChildRef; - use zksync_types::{AccountTreeId, Address, StorageKey, H256}; - - #[test] - fn empty_tree_hash_is_as_expected() { - const EXPECTED_HASH: ValueHash = H256([ - 152, 164, 142, 78, 209, 115, 97, 136, 56, 74, 232, 167, 157, 210, 28, 77, 102, 135, - 229, 253, 34, 202, 24, 20, 137, 6, 215, 135, 54, 192, 216, 106, - ]); - - let hasher: &dyn HashTree = &Blake2Hasher; - assert_eq!(hasher.empty_tree_hash(), EXPECTED_HASH); - } - - #[test] - fn leaf_is_hashed_as_expected() { - // Reference value taken from the previous implementation. - const EXPECTED_HASH: ValueHash = H256([ - 127, 0, 166, 178, 238, 222, 150, 8, 87, 112, 60, 140, 185, 233, 111, 40, 185, 16, 230, - 105, 52, 18, 206, 164, 176, 6, 242, 66, 57, 182, 129, 224, - ]); - - let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); - let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); - let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); - - let stats = HashingStats::default(); - let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); - let leaf_hash = leaf.hash(&mut hasher, 0); - assert_eq!(leaf_hash, EXPECTED_HASH); - - drop(hasher); - assert!(stats.hashed_bytes.into_inner() > 100); - - let hasher: &dyn HashTree = &Blake2Hasher; - let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); - assert_eq!(folded_hash, EXPECTED_HASH); - } - - #[test] - fn folding_merkle_path() { - let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); - let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); - let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); - - let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); - let leaf_hash = leaf.hash(&mut hasher, 2); - assert!(key.bit(254) && !key.bit(255)); - let merkle_path = [H256([2; 32]), H256([3; 32])]; - let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); - let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); - - let folded_hash = hasher - .inner - .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); - assert_eq!(folded_hash, expected_hash); - } + use crate::hasher::HashTree; + use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + use zksync_types::H256; fn test_internal_node_hashing(child_indexes: &[u8]) { println!("Testing indices: {child_indexes:?}"); diff --git a/core/lib/merkle_tree/src/hasher/proofs.rs b/core/lib/merkle_tree/src/hasher/proofs.rs new file mode 100644 index 00000000000..e496acb3f88 --- /dev/null +++ b/core/lib/merkle_tree/src/hasher/proofs.rs @@ -0,0 +1,219 @@ +//! Merkle proof-related hashing logic. + +use std::mem; + +use crate::{ + hasher::{HashTree, HasherWithStats}, + types::{ + BlockOutputWithProofs, Key, LeafNode, TreeEntry, TreeEntryWithProof, TreeInstruction, + TreeLogEntry, ValueHash, TREE_DEPTH, + }, + utils, +}; + +impl BlockOutputWithProofs { + /// Verifies this output against the trusted old root hash of the tree and + /// the applied instructions. + /// + /// # Panics + /// + /// Panics if the proof doesn't verify. + pub fn verify_proofs( + &self, + hasher: &dyn HashTree, + old_root_hash: ValueHash, + instructions: &[(Key, TreeInstruction)], + ) { + assert_eq!(instructions.len(), self.logs.len()); + + let mut root_hash = old_root_hash; + for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { + assert!(op.merkle_path.len() <= TREE_DEPTH); + if matches!(instruction, TreeInstruction::Read) { + assert_eq!(op.root_hash, root_hash); + assert!(op.base.is_read()); + } else { + assert!(!op.base.is_read()); + } + + let (prev_leaf_index, leaf_index, prev_value) = match op.base { + TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), + TreeLogEntry::Updated { + leaf_index, + previous_value, + } => (leaf_index, leaf_index, previous_value), + + TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), + TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), + }; + + let prev_hash = + hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); + assert_eq!(prev_hash, root_hash); + if let TreeInstruction::Write(value) = instruction { + let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); + assert_eq!(next_hash, op.root_hash); + } + root_hash = op.root_hash; + } + } +} + +impl TreeEntryWithProof { + /// Verifies this proof. + /// + /// # Panics + /// + /// Panics if the proof doesn't verify. + pub fn verify(&self, hasher: &dyn HashTree, key: Key, trusted_root_hash: ValueHash) { + if self.base.leaf_index == 0 { + assert!( + self.base.value_hash.is_zero(), + "Invalid missing value specification: leaf index is zero, but value is non-default" + ); + } + let root_hash = hasher.fold_merkle_path( + &self.merkle_path, + key, + self.base.value_hash, + self.base.leaf_index, + ); + assert_eq!(root_hash, trusted_root_hash, "Root hash mismatch"); + } +} + +/// Range digest in a Merkle tree allowing to compute its root hash based on the provided entries. +/// +/// - The entries must be ordered by key. I.e., the first entry must have the numerically smallest key, +/// and the last entry must have the numerically greatest key among all provided entries. +/// - The first and the last entries must be provided together with a Merkle proof; other entries +/// do not need proofs. +/// - Any entry can be [empty](TreeEntry::is_empty()). I.e., there's no requirement to only +/// provide existing entries. +/// +/// This construction is useful for verifying *Merkle range proofs*. Such a proof proves that +/// a certain key range in the Merkle tree contains the specified entries and no other entries. +/// +/// # Implementation details +/// +/// A streaming approach is used. `TreeRange` occupies `O(1)` RAM w.r.t. the number of entries. +/// `TreeRange` consists of `TREE_DEPTH = 256` hashes and a constant amount of other data. +// +// We keep a *left contour* of hashes, i.e., known hashes to the left of the last processed key. +// Initially, the left contour is a filtered Merkle path for the start entry; we only take into +// account left hashes in the path (ones for which the corresponding start key bit is 1), and +// discard right hashes. +// +// When a `TreeRange` is updated, we find the first diverging bit between the last processed key +// and the new key. (This bit is always 0 in the last processed key and 1 in the new key.) +// +// ```text +// ... +// diverging_level: / \ +// ... | (only empty subtrees) | +// TREE_DEPTH: current_leaf next_leaf +// ``` +// +// We update the left contour by collapsing the last processed entry up to the diverging bit. +// When collapsing, we take advantage of the fact that all right hashes in the collapsed part +// of the Merkle path correspond to empty subtrees. We also clean all hashes in the left contour +// further down the tree; it's guaranteed that the next key will only have empty subtrees +// to the left of it until the diverging level. +// +// When we want to finalize a range, we update the left contour one final time, and then collapse +// the Merkle path for the final key all the way to the root hash. When doing this, we take +// right hashes from the provided path, and left hashes from the left contour (left hashes from +// the final entry Merkle path are discarded). +#[derive(Debug)] +pub struct TreeRangeDigest<'a> { + hasher: HasherWithStats<'a>, + current_leaf: LeafNode, + left_contour: Box<[ValueHash; TREE_DEPTH]>, +} + +impl<'a> TreeRangeDigest<'a> { + /// Starts a new Merkle tree range. + #[allow(clippy::missing_panics_doc)] // false positive + pub fn new(hasher: &'a dyn HashTree, start_key: Key, start_entry: &TreeEntryWithProof) -> Self { + let full_path = hasher.extend_merkle_path(&start_entry.merkle_path); + let left_contour = full_path.enumerate().map(|(depth, adjacent_hash)| { + if start_key.bit(depth) { + adjacent_hash // `adjacent_hash` is to the left of the `start_key`; take it + } else { + hasher.empty_subtree_hash(depth) + } + }); + let left_contour: Vec<_> = left_contour.collect(); + Self { + hasher: hasher.into(), + current_leaf: LeafNode::new( + start_key, + start_entry.base.value_hash, + start_entry.base.leaf_index, + ), + left_contour: left_contour.try_into().unwrap(), + // ^ `unwrap()` is safe by construction; `left_contour` will always have necessary length + } + } + + /// Updates this digest with a new entry. + /// + /// # Panics + /// + /// Panics if the provided `key` is not greater than the previous key provided to this digest. + pub fn update(&mut self, key: Key, entry: TreeEntry) { + assert!( + key > self.current_leaf.full_key, + "Keys provided to a digest must be monotonically increasing" + ); + + let diverging_level = utils::find_diverging_bit(self.current_leaf.full_key, key) + 1; + + // Hash the current leaf up to the `diverging_level`, taking current `left_contour` into account. + let mut hash = self + .hasher + .hash_leaf(&self.current_leaf.value_hash, self.current_leaf.leaf_index); + for depth in 0..(TREE_DEPTH - diverging_level) { + let empty_subtree_hash = self.hasher.empty_subtree_hash(depth); + // Replace the left contour value with the default one. + let left_hash = mem::replace(&mut self.left_contour[depth], empty_subtree_hash); + + hash = if self.current_leaf.full_key.bit(depth) { + self.hasher.hash_branch(&left_hash, &hash) + } else { + // We don't take right contour into account, since by construction (because we iterate + // over keys in ascending order) it's always empty. + self.hasher.hash_branch(&hash, &empty_subtree_hash) + }; + } + // Record the computed hash. + self.left_contour[TREE_DEPTH - diverging_level] = hash; + self.current_leaf = LeafNode::new(key, entry.value_hash, entry.leaf_index); + } + + /// Finalizes this digest and returns the root hash of the tree. + /// + /// # Panics + /// + /// Panics if the provided `final_key` is not greater than the previous key provided to this digest. + pub fn finalize(mut self, final_key: Key, final_entry: &TreeEntryWithProof) -> ValueHash { + self.update(final_key, final_entry.base); + + let full_path = self + .hasher + .inner + .extend_merkle_path(&final_entry.merkle_path); + let zipped_paths = self.left_contour.into_iter().zip(full_path); + let mut hash = self + .hasher + .hash_leaf(&final_entry.base.value_hash, final_entry.base.leaf_index); + for (depth, (left, right)) in zipped_paths.enumerate() { + hash = if final_key.bit(depth) { + self.hasher.hash_branch(&left, &hash) + } else { + self.hasher.hash_branch(&hash, &right) + }; + } + hash + } +} diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 8296e1598ff..a3344d1d670 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -64,15 +64,16 @@ pub mod unstable { } pub use crate::{ - hasher::HashTree, + errors::NoVersionError, + hasher::{HashTree, TreeRangeDigest}, pruning::{MerkleTreePruner, MerkleTreePrunerHandle}, storage::{ Database, MerkleTreeColumnFamily, PatchSet, Patched, PruneDatabase, PrunePatchSet, RocksDBWrapper, }, types::{ - BlockOutput, BlockOutputWithProofs, Key, TreeInstruction, TreeLogEntry, - TreeLogEntryWithProof, ValueHash, + BlockOutput, BlockOutputWithProofs, Key, TreeEntry, TreeEntryWithProof, TreeInstruction, + TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, }; diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index 3c653d16176..a7553727467 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -92,23 +92,6 @@ impl TreeUpdater { longest_prefixes } - fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { - for nibble_idx in parent_nibbles.nibble_count().. { - let nibbles = Nibbles::new(&key, nibble_idx); - match self.patch_set.get(&nibbles) { - Some(Node::Internal(_)) => { /* continue descent */ } - Some(Node::Leaf(leaf)) if leaf.full_key == key => { - return TraverseOutcome::LeafMatch(nibbles, *leaf); - } - Some(Node::Leaf(leaf)) => { - return TraverseOutcome::LeafMismatch(nibbles, *leaf); - } - None => return TraverseOutcome::MissingChild(nibbles), - } - } - unreachable!("We must have encountered a leaf or missing node when traversing"); - } - /// Inserts or updates a value hash for the specified `key`. This implementation /// is almost verbatim the algorithm described in the Jellyfish Merkle tree white paper. /// The algorithm from the paper is as follows: @@ -138,7 +121,7 @@ impl TreeUpdater { leaf_index_fn: impl FnOnce() -> u64, ) -> (TreeLogEntry, NewLeafData) { let version = self.patch_set.version(); - let traverse_outcome = self.traverse(key, parent_nibbles); + let traverse_outcome = self.patch_set.traverse(key, parent_nibbles); let (log, leaf_data) = match traverse_outcome { TraverseOutcome::LeafMatch(nibbles, mut leaf) => { let log = TreeLogEntry::update(leaf.value_hash, leaf.leaf_index); diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 2cfe2c9375b..5f98f3a2a66 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -5,14 +5,14 @@ use rayon::prelude::*; use std::collections::{hash_map::Entry, HashMap}; use crate::{ - hasher::HashTree, + hasher::{HashTree, HasherWithStats, MerklePath}, metrics::HashingStats, - storage::{proofs::SUBTREE_COUNT, SortedKeys}, + storage::{proofs::SUBTREE_COUNT, SortedKeys, TraverseOutcome}, types::{ - ChildRef, InternalNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, Root, ValueHash, - KEY_SIZE, + ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, + Root, ValueHash, KEY_SIZE, }, - Database, + utils, Database, }; /// Raw set of database changes. @@ -512,6 +512,83 @@ impl WorkingPatchSet { db_reads, } } + + pub(super) fn traverse(&self, key: Key, parent_nibbles: &Nibbles) -> TraverseOutcome { + for nibble_idx in parent_nibbles.nibble_count().. { + let nibbles = Nibbles::new(&key, nibble_idx); + match self.get(&nibbles) { + Some(Node::Internal(_)) => { /* continue descent */ } + Some(Node::Leaf(leaf)) if leaf.full_key == key => { + return TraverseOutcome::LeafMatch(nibbles, *leaf); + } + Some(Node::Leaf(leaf)) => { + return TraverseOutcome::LeafMismatch(nibbles, *leaf); + } + None => return TraverseOutcome::MissingChild(nibbles), + } + } + unreachable!("We must have encountered a leaf or missing node when traversing"); + } + + /// Creates a Merkle proof for the specified `key`, which has given `parent_nibbles` + /// in this patch set. `root_nibble_count` specifies to which level the proof needs to be constructed. + pub(crate) fn create_proof( + &mut self, + hasher: &mut HasherWithStats<'_>, + key: Key, + parent_nibbles: &Nibbles, + root_nibble_count: usize, + ) -> (Option, MerklePath) { + let traverse_outcome = self.traverse(key, parent_nibbles); + let merkle_path = match traverse_outcome { + TraverseOutcome::MissingChild(_) | TraverseOutcome::LeafMatch(..) => None, + TraverseOutcome::LeafMismatch(nibbles, leaf) => { + // Find the level at which `leaf.full_key` and `key` diverge. + // Note the addition of 1; e.g., if the keys differ at 0th bit, they + // differ at level 1 of the tree. + let diverging_level = utils::find_diverging_bit(key, leaf.full_key) + 1; + let nibble_count = nibbles.nibble_count(); + debug_assert!(diverging_level > 4 * nibble_count); + let mut path = MerklePath::new(diverging_level); + // Find the hash of the existing `leaf` at the level, and include it + // as the first hash on the Merkle path. + let adjacent_hash = leaf.hash(hasher, diverging_level); + path.push(hasher, Some(adjacent_hash)); + // Fill the path with empty hashes until we've reached the leaf level. + for _ in (4 * nibble_count + 1)..diverging_level { + path.push(hasher, None); + } + Some(path) + } + }; + + let mut nibbles = traverse_outcome.position(); + let leaf_level = nibbles.nibble_count() * 4; + debug_assert!(leaf_level >= root_nibble_count); + + let mut merkle_path = merkle_path.unwrap_or_else(|| MerklePath::new(leaf_level)); + while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { + if parent_nibbles.nibble_count() < root_nibble_count { + break; + } + + let parent = self.get_mut_without_updating(&parent_nibbles); + let Some(Node::Internal(parent)) = parent else { + unreachable!() + }; + let parent_level = parent_nibbles.nibble_count() * 4; + parent + .updater(hasher, parent_level, last_nibble) + .extend_merkle_path(&mut merkle_path); + nibbles = parent_nibbles; + } + + let leaf = match traverse_outcome { + TraverseOutcome::MissingChild(_) | TraverseOutcome::LeafMismatch(..) => None, + TraverseOutcome::LeafMatch(_, leaf) => Some(leaf), + }; + (leaf, merkle_path) + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/proofs.rs b/core/lib/merkle_tree/src/storage/proofs.rs index a854ee5e627..a9ad624225d 100644 --- a/core/lib/merkle_tree/src/storage/proofs.rs +++ b/core/lib/merkle_tree/src/storage/proofs.rs @@ -63,12 +63,12 @@ use rayon::prelude::*; use crate::{ hasher::{HasherWithStats, MerklePath}, metrics::{HashingStats, TreeUpdaterStats, BLOCK_TIMINGS, GENERAL_METRICS}, - storage::{Database, NewLeafData, PatchSet, SortedKeys, Storage, TraverseOutcome, TreeUpdater}, + storage::{Database, NewLeafData, PatchSet, SortedKeys, Storage, TreeUpdater}, types::{ BlockOutputWithProofs, InternalNode, Key, Nibbles, Node, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, - utils::{find_diverging_bit, increment_counter, merge_by_index}, + utils::{increment_counter, merge_by_index}, }; /// Number of subtrees used for parallel computations. @@ -179,61 +179,18 @@ impl TreeUpdater { key: Key, parent_nibbles: &Nibbles, ) -> (TreeLogEntry, MerklePath) { - let traverse_outcome = self.traverse(key, parent_nibbles); - let (operation, merkle_path) = match traverse_outcome { - TraverseOutcome::MissingChild(_) => (TreeLogEntry::ReadMissingKey, None), - TraverseOutcome::LeafMatch(_, leaf) => { - let log = TreeLogEntry::read(leaf.value_hash, leaf.leaf_index); - (log, None) - } - TraverseOutcome::LeafMismatch(nibbles, leaf) => { - // Find the level at which `leaf.full_key` and `key` diverge. - // Note the addition of 1; e.g., if the keys differ at 0th bit, they - // differ at level 1 of the tree. - let diverging_level = find_diverging_bit(key, leaf.full_key) + 1; - let nibble_count = nibbles.nibble_count(); - debug_assert!(diverging_level > 4 * nibble_count); - let mut path = MerklePath::new(diverging_level); - // Find the hash of the existing `leaf` at the level, and include it - // as the first hash on the Merkle path. - let adjacent_hash = leaf.hash(hasher, diverging_level); - path.push(hasher, Some(adjacent_hash)); - // Fill the path with empty hashes until we've reached the leaf level. - for _ in (4 * nibble_count + 1)..diverging_level { - path.push(hasher, None); - } - (TreeLogEntry::ReadMissingKey, Some(path)) - } - }; + let (leaf, merkle_path) = + self.patch_set + .create_proof(hasher, key, parent_nibbles, SUBTREE_ROOT_LEVEL / 4); + let operation = leaf.map_or(TreeLogEntry::ReadMissingKey, |leaf| { + TreeLogEntry::read(leaf.value_hash, leaf.leaf_index) + }); if matches!(operation, TreeLogEntry::ReadMissingKey) { self.metrics.missing_key_reads += 1; } else { self.metrics.key_reads += 1; } - - let mut nibbles = traverse_outcome.position(); - let leaf_level = nibbles.nibble_count() * 4; - debug_assert!(leaf_level >= SUBTREE_ROOT_LEVEL); - // ^ Because we've ensured an internal root node, all found positions have at least - // 1 nibble. - - let mut merkle_path = merkle_path.unwrap_or_else(|| MerklePath::new(leaf_level)); - while let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { - if parent_nibbles.nibble_count() == 0 { - break; - } - - let parent = self.patch_set.get_mut_without_updating(&parent_nibbles); - let Some(Node::Internal(parent)) = parent else { - unreachable!() - }; - let parent_level = parent_nibbles.nibble_count() * 4; - parent - .updater(hasher, parent_level, last_nibble) - .extend_merkle_path(&mut merkle_path); - nibbles = parent_nibbles; - } (operation, merkle_path) } diff --git a/core/lib/merkle_tree/src/types.rs b/core/lib/merkle_tree/src/types/internal.rs similarity index 83% rename from core/lib/merkle_tree/src/types.rs rename to core/lib/merkle_tree/src/types/internal.rs index 7aa0c1d023e..86568da7f5d 100644 --- a/core/lib/merkle_tree/src/types.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -1,4 +1,6 @@ -//! Basic storage types. +//! Internal types, mostly related to Merkle tree nodes. Note that because of the public `Database` trait, +//! some of these types are declared as public and can be even exported using the `unstable` module. +//! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. use std::{fmt, num::NonZeroU64}; @@ -16,15 +18,6 @@ pub(crate) const TREE_DEPTH: usize = KEY_SIZE * 8; /// Size of a hashed value in bytes. pub(crate) const HASH_SIZE: usize = 32; -/// Instruction to read or write a tree value at a certain key. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TreeInstruction { - /// Read the current tree value. - Read, - /// Write the specified value. - Write(ValueHash), -} - /// Tags associated with a tree. #[derive(Debug, Clone)] #[cfg_attr(test, derive(PartialEq))] @@ -320,22 +313,6 @@ impl LeafNode { } } -/// Data of a leaf node of the tree. -#[derive(Debug, Clone, Copy)] -pub struct LeafData { - pub value_hash: ValueHash, - pub leaf_index: u64, -} - -impl From for LeafData { - fn from(leaf: LeafNode) -> Self { - Self { - value_hash: leaf.value_hash, - leaf_index: leaf.leaf_index, - } - } -} - /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] @@ -532,97 +509,6 @@ impl StaleNodeKey { } } -/// Output of inserting a block of entries into a Merkle tree. -#[derive(Debug, PartialEq, Eq)] -pub struct BlockOutput { - /// The new hash of the tree. - pub root_hash: ValueHash, - /// The number of leaves in the tree after the update. - pub leaf_count: u64, - /// Information about each insertion / update operation in the order of application. - pub logs: Vec, -} - -/// Information about an the effect of a [`TreeInstruction`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TreeLogEntry { - /// A node was inserted into the tree. - Inserted { - /// Index of the inserted node. - leaf_index: u64, - }, - /// A node with the specified index was updated. - Updated { - /// Index of the updated node. - leaf_index: u64, - /// Hash of the previous value. - previous_value: ValueHash, - }, - /// A node was read from the tree. - Read { - /// Index of the read node. - leaf_index: u64, - /// Hash of the read value. - value: ValueHash, - }, - /// A missing key was read. - ReadMissingKey, -} - -impl TreeLogEntry { - pub(crate) fn insert(leaf_index: u64) -> Self { - Self::Inserted { leaf_index } - } - - pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { - Self::Updated { - leaf_index, - previous_value, - } - } - - pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { - Self::Read { leaf_index, value } - } - - pub(crate) fn is_read(&self) -> bool { - matches!(self, Self::Read { .. } | Self::ReadMissingKey) - } -} - -/// Extended output of inserting a block of entries into a Merkle tree that contains -/// Merkle proofs for each operation. -#[derive(Debug)] -pub struct BlockOutputWithProofs { - /// Extended information about each insertion / update operation in the order of application. - pub logs: Vec, - /// The number of leaves in the tree after the update. - pub leaf_count: u64, -} - -impl BlockOutputWithProofs { - /// Returns the final root hash of the Merkle tree. - pub fn root_hash(&self) -> Option { - Some(self.logs.last()?.root_hash) - } -} - -/// [`TreeLogEntry`] together with its authenticity proof. -#[derive(Debug)] -pub struct TreeLogEntryWithProof

> { - /// Log entry about an atomic operation on the tree. - pub base: TreeLogEntry, - /// Merkle path to prove the log authenticity. The path consists of up to 256 hashes - /// ordered starting the bottommost level of the tree (one with leaves) and ending before - /// the root level. - /// - /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning - /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. - pub merkle_path: P, - /// Root tree hash after the operation. - pub root_hash: ValueHash, -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs new file mode 100644 index 00000000000..6988735ec02 --- /dev/null +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -0,0 +1,163 @@ +//! Basic storage types. + +mod internal; + +pub(crate) use self::internal::{ + ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, +}; +pub use self::internal::{InternalNode, Key, LeafNode, Manifest, Node, NodeKey, Root, ValueHash}; + +/// Instruction to read or write a tree value at a certain key. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeInstruction { + /// Read the current tree value. + Read, + /// Write the specified value. + Write(ValueHash), +} + +/// Entry in a Merkle tree associated with a key. +#[derive(Debug, Clone, Copy)] +pub struct TreeEntry { + /// Value associated with the key. + pub value_hash: ValueHash, + /// Enumeration index of the key. + pub leaf_index: u64, +} + +impl From for TreeEntry { + fn from(leaf: LeafNode) -> Self { + Self { + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + } + } +} + +impl TreeEntry { + pub(crate) fn empty() -> Self { + Self { + value_hash: ValueHash::zero(), + leaf_index: 0, + } + } + + /// Returns `true` iff this entry encodes lack of a value. + pub fn is_empty(&self) -> bool { + self.leaf_index == 0 && self.value_hash.is_zero() + } + + pub(crate) fn with_merkle_path(self, merkle_path: Vec) -> TreeEntryWithProof { + TreeEntryWithProof { + base: self, + merkle_path, + } + } +} + +/// Entry in a Merkle tree together with a proof of authenticity. +#[derive(Debug, Clone)] +pub struct TreeEntryWithProof { + /// Entry in a Merkle tree. + pub base: TreeEntry, + /// Proof of the value authenticity. + /// + /// If specified, a proof is the Merkle path consisting of up to 256 hashes + /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// the root level. + /// + /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning + /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. + pub merkle_path: Vec, +} + +/// Output of inserting a block of entries into a Merkle tree. +#[derive(Debug, PartialEq, Eq)] +pub struct BlockOutput { + /// The new hash of the tree. + pub root_hash: ValueHash, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, + /// Information about each insertion / update operation in the order of application. + pub logs: Vec, +} + +/// Information about an the effect of a [`TreeInstruction`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TreeLogEntry { + /// A node was inserted into the tree. + Inserted { + /// Index of the inserted node. + leaf_index: u64, + }, + /// A node with the specified index was updated. + Updated { + /// Index of the updated node. + leaf_index: u64, + /// Hash of the previous value. + previous_value: ValueHash, + }, + /// A node was read from the tree. + Read { + /// Index of the read node. + leaf_index: u64, + /// Hash of the read value. + value: ValueHash, + }, + /// A missing key was read. + ReadMissingKey, +} + +impl TreeLogEntry { + pub(crate) fn insert(leaf_index: u64) -> Self { + Self::Inserted { leaf_index } + } + + pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { + Self::Updated { + leaf_index, + previous_value, + } + } + + pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { + Self::Read { leaf_index, value } + } + + pub(crate) fn is_read(&self) -> bool { + matches!(self, Self::Read { .. } | Self::ReadMissingKey) + } +} + +/// Extended output of inserting a block of entries into a Merkle tree that contains +/// Merkle proofs for each operation. +#[derive(Debug)] +pub struct BlockOutputWithProofs { + /// Extended information about each insertion / update operation in the order of application. + pub logs: Vec, + /// The number of leaves in the tree after the update. + pub leaf_count: u64, +} + +impl BlockOutputWithProofs { + /// Returns the final root hash of the Merkle tree. + pub fn root_hash(&self) -> Option { + Some(self.logs.last()?.root_hash) + } +} + +/// [`TreeLogEntry`] together with its authenticity proof. +#[derive(Debug)] +pub struct TreeLogEntryWithProof

> { + /// Log entry about an atomic operation on the tree. + pub base: TreeLogEntry, + /// Merkle path to prove log authenticity. The path consists of up to 256 hashes + /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// the root level. + /// + /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning + /// corresponding to the empty subtrees are skipped. This allows compacting the proof ~10x. + pub merkle_path: P, + /// Root tree hash after the operation. + pub root_hash: ValueHash, +} diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index 96e36f34d1b..f94335390ee 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -3,9 +3,12 @@ use once_cell::sync::Lazy; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use std::{cmp, mem}; + use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, PatchSet, Patched, TreeInstruction, TreeLogEntry, + TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -91,7 +94,7 @@ fn root_hash_is_computed_correctly_on_empty_tree() { } #[test] -fn proofs_are_computed_correctly_on_empty_tree() { +fn output_proofs_are_computed_correctly_on_empty_tree() { const RNG_SEED: u64 = 123; let mut rng = StdRng::seed_from_u64(RNG_SEED); @@ -121,6 +124,50 @@ fn proofs_are_computed_correctly_on_empty_tree() { } } +#[test] +fn entry_proofs_are_computed_correctly_on_empty_tree() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { + println!("Inserting {kv_count} key-value pairs"); + + let mut tree = MerkleTree::new(PatchSet::default()); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(&kvs); + tree.extend(kvs.clone()); + + let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let entries = tree.entries_with_proofs(0, &existing_keys).unwrap(); + assert_eq!(entries.len(), existing_keys.len()); + for ((key, value), entry) in kvs.iter().zip(entries) { + entry.verify(&Blake2Hasher, *key, expected_hash); + assert_eq!(entry.base.value_hash, *value); + } + + // Test some keys adjacent to existing ones. + let adjacent_keys = kvs.iter().flat_map(|(key, _)| { + [ + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + ] + }); + let random_keys = generate_key_value_pairs(kv_count..(kv_count * 2)) + .into_iter() + .map(|(key, _)| key); + let mut missing_keys: Vec<_> = adjacent_keys.chain(random_keys).collect(); + missing_keys.shuffle(&mut rng); + + let entries = tree.entries_with_proofs(0, &missing_keys).unwrap(); + assert_eq!(entries.len(), missing_keys.len()); + for (key, entry) in missing_keys.iter().zip(entries) { + assert!(entry.base.is_empty()); + entry.verify(&Blake2Hasher, *key, expected_hash); + } + } +} + #[test] fn proofs_are_computed_correctly_for_mixed_instructions() { const RNG_SEED: u64 = 123; @@ -206,7 +253,7 @@ fn root_hash_is_computed_correctly_with_intermediate_commits() { } #[test] -fn proofs_are_computed_correctly_with_intermediate_commits() { +fn output_proofs_are_computed_correctly_with_intermediate_commits() { let (kvs, expected_hash) = &*KVS_AND_HASH; for chunk_size in [3, 5, 10, 17, 28, 42] { println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); @@ -223,6 +270,40 @@ fn proofs_are_computed_correctly_with_intermediate_commits() { } } +#[test] +fn entry_proofs_are_computed_correctly_with_intermediate_commits() { + let (kvs, _) = &*KVS_AND_HASH; + let all_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + for chunk_size in [10, 17, 28, 42] { + println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); + + let mut tree = MerkleTree::new(PatchSet::default()); + let mut root_hashes = vec![]; + for chunk in kvs.chunks(chunk_size) { + let output = tree.extend(chunk.to_vec()); + root_hashes.push(output.root_hash); + + let version = root_hashes.len() - 1; + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, output.root_hash); + } + } + + // Check all tree versions. + for (version, root_hash) in root_hashes.into_iter().enumerate() { + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, root_hash); + } + } + } +} + fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { let (kvs, expected_hash) = &*KVS_AND_HASH; let mut db = Patched::new(db); @@ -388,6 +469,13 @@ fn proofs_are_computed_correctly_with_key_updates() { let output = tree.extend_with_proofs(instructions.clone()); assert_eq!(output.root_hash(), Some(*expected_hash)); output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + + let keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let proofs = tree.entries_with_proofs(1, &keys).unwrap(); + for ((key, value), proof) in kvs.iter().zip(proofs) { + assert_eq!(proof.base.value_hash, *value); + proof.verify(&Blake2Hasher, *key, *expected_hash); + } } } @@ -432,6 +520,95 @@ fn root_hash_equals_to_previous_implementation() { test_root_hash_equals_to_previous_implementation(&mut PatchSet::default()); } +fn test_range_proofs_simple(range_size: usize) { + let (kvs, expected_hash) = &*KVS_AND_HASH; + assert!(range_size >= 2 && range_size <= kvs.len()); + + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(kvs.clone()); + + let mut sorted_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + sorted_keys.sort_unstable(); + + for start_idx in 0..(sorted_keys.len() - range_size) { + let key_range = &sorted_keys[start_idx..(start_idx + range_size)]; + let [first_key, other_keys @ .., last_key] = key_range else { + unreachable!(); + }; + + let mut proven_entries = tree + .entries_with_proofs(0, &[*first_key, *last_key]) + .unwrap(); + let last_entry = proven_entries.pop().unwrap(); + let first_entry = proven_entries.pop().unwrap(); + let other_entries = tree.entries(0, other_keys).unwrap(); + + let mut range = TreeRangeDigest::new(&Blake2Hasher, *first_key, &first_entry); + for (key, entry) in other_keys.iter().zip(other_entries) { + range.update(*key, entry); + } + let range_hash = range.finalize(*last_key, &last_entry); + assert_eq!(range_hash, *expected_hash); + } +} + +#[test] +fn range_proofs_with_multiple_existing_items() { + for range_size in [2, 3, 5, 10, 17, 28, 42] { + println!("Testing range proofs with {range_size} items"); + test_range_proofs_simple(range_size); + } +} + +#[test] +fn range_proofs_for_almost_full_range() { + for range_size in 95..=100 { + println!("Testing range proofs with {range_size} items"); + test_range_proofs_simple(range_size); + } +} + +#[test] +fn range_proofs_with_random_ranges() { + const ITER_COUNT: usize = 100; + const RNG_SEED: u64 = 321; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let (kvs, expected_hash) = &*KVS_AND_HASH; + let mut tree = MerkleTree::new(PatchSet::default()); + tree.extend(kvs.clone()); + + for _ in 0..ITER_COUNT { + let mut start_key = U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]); + let mut end_key = U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]); + match start_key.cmp(&end_key) { + cmp::Ordering::Less => { /* ok */ } + cmp::Ordering::Equal => continue, + cmp::Ordering::Greater => mem::swap(&mut start_key, &mut end_key), + } + + // Find out keys falling into the range. + let keys_in_range = kvs + .iter() + .filter_map(|&(key, _)| (key > start_key && key < end_key).then_some(key)); + let mut keys_in_range: Vec<_> = keys_in_range.collect(); + keys_in_range.sort_unstable(); + println!("Proving range with {} keys", keys_in_range.len()); + + let mut proven_entries = tree.entries_with_proofs(0, &[start_key, end_key]).unwrap(); + let last_entry = proven_entries.pop().unwrap(); + let first_entry = proven_entries.pop().unwrap(); + let other_entries = tree.entries(0, &keys_in_range).unwrap(); + + let mut range = TreeRangeDigest::new(&Blake2Hasher, start_key, &first_entry); + for (key, entry) in keys_in_range.iter().zip(other_entries) { + range.update(*key, entry); + } + let range_hash = range.finalize(end_key, &last_entry); + assert_eq!(range_hash, *expected_hash); + } +} + /// RocksDB-specific tests. mod rocksdb { use serde::{Deserialize, Serialize}; From 99b8aad24376d9beee0ff260a48fa52dad1b5c4e Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Sun, 8 Oct 2023 21:13:53 +0200 Subject: [PATCH 7/8] feat: Post FOSS Dockerfiles refactoring, take 1 (#166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ - Prettifies Dockerfiles - Removes some unnecessary dependencies on running something outside of Docker build (there are still some, though) ## Why ❔ To make a simple `docker build` command work without any additional hacks ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Yury Akudovich Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Danil Co-authored-by: Marcin M <128217157+mm-zk@users.noreply.github.com> --- .github/workflows/build-prover-template.yml | 21 ++---------- bin/ci_run | 14 +++++++- docker/circuit-synthesizer/Dockerfile | 9 +++-- docker/contract-verifier/Dockerfile | 11 ++++--- .../cross-external-nodes-checker/Dockerfile | 13 +++----- docker/external-node/Dockerfile | 16 +++++---- docker/local-node/Dockerfile | 2 -- docker/local-node/entrypoint.sh | 2 +- docker/proof-fri-compressor/Dockerfile | 8 ++--- docker/prover-fri-gateway/Dockerfile | 8 ++--- docker/prover-fri/Dockerfile | 8 ++--- docker/prover-gar/Dockerfile | 5 +-- docker/prover-gpu-fri-gar/Dockerfile | 3 +- docker/prover-gpu-fri/Dockerfile | 14 ++++---- docker/prover/Dockerfile | 33 +++++++++++++++---- docker/runner/Dockerfile | 5 --- docker/server-v2/Dockerfile | 12 ++++--- docker/witness-generator/Dockerfile | 8 ++--- docker/witness-vector-generator/Dockerfile | 8 ++--- etc/scripts/prepare_bellman_cuda.sh | 7 ---- etc/scripts/prepare_era_bellman_cuda.sh | 11 ------- infrastructure/zk/src/docker.ts | 18 +++++++--- 22 files changed, 112 insertions(+), 124 deletions(-) delete mode 100644 docker/runner/Dockerfile delete mode 100755 etc/scripts/prepare_bellman_cuda.sh delete mode 100755 etc/scripts/prepare_era_bellman_cuda.sh diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index b3a0c262503..a93a890622a 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -28,27 +28,14 @@ on: required: false jobs: - era-bellman-cuda: - name: Get era-bellman-cuda release URLs. - runs-on: [ubuntu-latest] - outputs: - source: ${{ steps.release.outputs.source }} - binary: ${{ steps.release.outputs.binary }} - steps: - - id: release - run: | - release=($(curl -v --silent https://api.github.com/repos/matter-labs/era-bellman-cuda/releases/tags/${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} | jq -r ".name, .tarball_url, .assets[0].browser_download_url")) - echo "source=${release[1]}" >> "$GITHUB_OUTPUT" - echo "binary=${release[2]}" >> "$GITHUB_OUTPUT" - build-images: name: Build and Push Docker Images env: image_tag: ${{ inputs.image_tag }} IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" + ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} runs-on: [matterlabs-ci-runner] - needs: [era-bellman-cuda] strategy: matrix: component: @@ -65,9 +52,6 @@ jobs: with: submodules: "recursive" - - name: Prepare bellman-cuda directory - run: etc/scripts/prepare_era_bellman_cuda.sh ${{ needs.era-bellman-cuda.outputs.source }} ${{ needs.era-bellman-cuda.outputs.binary }} - - name: setup-env run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV @@ -113,7 +97,7 @@ jobs: "proof-fri-compressor" ) run: | - nightly_components=${{env.RUST_NIGHTLY_COMPONENTS}} + nightly_components=${{ env.RUST_NIGHTLY_COMPONENTS }} ci_run docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} ci_run gcloud auth configure-docker us-docker.pkg.dev,asia-docker.pkg.dev -q @@ -130,6 +114,7 @@ jobs: ci_run echo [workspace] > Cargo.toml ci_run echo members = [\"prover/${underscored_name}\"] >> Cargo.toml ci_run cp prover/Cargo.lock Cargo.lock + PASSED_ENV_VARS="ERA_BELLMAN_CUDA_RELEASE" \ ci_run zk docker $DOCKER_ACTION $COMPONENT else ci_run zk docker $DOCKER_ACTION $COMPONENT diff --git a/bin/ci_run b/bin/ci_run index 0f578106f46..d3fa5414b5f 100755 --- a/bin/ci_run +++ b/bin/ci_run @@ -3,4 +3,16 @@ # Runs the command from within CI docker-compose environment. cd $ZKSYNC_HOME compose_file="${RUNNER_COMPOSE_FILE:-docker-compose-runner.yml}" -docker-compose -f $compose_file exec -T zk $@ + +# Pass environment variables explicitly if specified +if [ ! -z "$PASSED_ENV_VARS" ]; then + env_vars_option="" + IFS=',' read -ra ADDR <<<"$PASSED_ENV_VARS" + for var in "${ADDR[@]}"; do + value=$(eval echo \$$var) + env_vars_option="${env_vars_option} -e ${var}=${value}" + done + docker-compose -f $compose_file exec -T $env_vars_option zk $@ +else + docker-compose -f $compose_file exec -T zk $@ +fi diff --git a/docker/circuit-synthesizer/Dockerfile b/docker/circuit-synthesizer/Dockerfile index 811e73a19ed..e64ada1d1a8 100644 --- a/docker/circuit-synthesizer/Dockerfile +++ b/docker/circuit-synthesizer/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates and bellman-cuda dependency outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,10 +17,13 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl openssl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_circuit_synthesizer /usr/bin/ + ENTRYPOINT ["zksync_circuit_synthesizer"] diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 74976449ef4..69e3a0a7df0 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,13 +1,15 @@ -# syntax=docker/dockerfile:experimental +# Will work locally only after building contracts first + FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . -#Needed to get zkEVM package from github link with auth embedded -# Doesn't expected to work local -RUN CARGO_HOME=./cargo cargo build --release + +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 && rm -rf /var/lib/apt/lists/* # install zksolc @@ -95,5 +97,6 @@ RUN mkdir -p /etc/vyper-bin/0.3.9 \ COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ COPY etc/system-contracts/artifacts-zk /etc/system-contracts/artifacts-zk + # CMD tail -f /dev/null ENTRYPOINT ["zksync_contract_verifier"] diff --git a/docker/cross-external-nodes-checker/Dockerfile b/docker/cross-external-nodes-checker/Dockerfile index 4f9fc72b923..301f67e8b0a 100644 --- a/docker/cross-external-nodes-checker/Dockerfile +++ b/docker/cross-external-nodes-checker/Dockerfile @@ -1,21 +1,16 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# BUILDING STAGE -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release -# RUNNING STAGE FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl ca-certificates && rm -rf /var/lib/apt/lists/* -# Bring the below from the building stage to the final image. COPY --from=builder /usr/src/zksync/target/release/cross_external_nodes_checker /usr/bin -# Run the entrypoint script as the default command. ENTRYPOINT ["cross_external_nodes_checker"] diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 58a341b14a3..60375216ba0 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,23 +1,23 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release -RUN CARGO_HOME=./cargo cargo install sqlx-cli --version 0.5.13 +RUN cargo build --release +RUN cargo install sqlx-cli --version 0.5.13 FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin -COPY --from=builder /usr/src/zksync/cargo/bin/sqlx /usr/bin +COPY --from=builder /usr/local/cargo/bin/sqlx /usr/bin COPY --from=builder /usr/src/zksync/docker/external-node/entrypoint.sh /usr/bin -RUN chmod +x /usr/bin/entrypoint.sh COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ COPY etc/system-contracts/contracts/artifacts/ /etc/system-contracts/contracts/artifacts/ COPY etc/system-contracts/contracts/precompiles/artifacts/ /etc/system-contracts/contracts/precompiles/artifacts/ @@ -29,4 +29,6 @@ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ COPY core/lib/dal/migrations/ /migrations +RUN chmod +x /usr/bin/entrypoint.sh + ENTRYPOINT [ "sh", "/usr/bin/entrypoint.sh"] diff --git a/docker/local-node/Dockerfile b/docker/local-node/Dockerfile index e34dded0110..2826c4a1474 100644 --- a/docker/local-node/Dockerfile +++ b/docker/local-node/Dockerfile @@ -1,5 +1,3 @@ -# syntax=docker/dockerfile:experimental - # Image is always built from the server image to reuse the common parts # This image is expected to be built locally beforehand (implemented in the `zk` tool) FROM matterlabs/server-v2:latest2.0 diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index 664cf4b3b6d..e96674d6bdc 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -13,7 +13,7 @@ then echo "Initialing local environment" psql ${DATABASE_URL%/*} -c "create database ${DATABASE_URL##*/}" find /migrations -name "*up.sql" | sort | xargs printf -- ' -f %s' | xargs -t psql $DATABASE_URL - + cd /infrastructure/zk # Compile configs yarn start config compile diff --git a/docker/proof-fri-compressor/Dockerfile b/docker/proof-fri-compressor/Dockerfile index 7fd50a923f9..e60998fac70 100644 --- a/docker/proof-fri-compressor/Dockerfile +++ b/docker/proof-fri-compressor/Dockerfile @@ -1,7 +1,5 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior universal setup key download -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,9 +19,10 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping @@ -33,4 +32,5 @@ COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_gener COPY setup_2\^26.key /setup_2\^26.key COPY --from=builder /usr/src/zksync/target/release/zksync_proof_fri_compressor /usr/bin/ + ENTRYPOINT ["zksync_proof_fri_compressor"] diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index 03341d20df6..6a7b27637ab 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,7 +17,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* @@ -29,6 +25,6 @@ RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf / # copy VK required for proof wrapping COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ - COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri_gateway /usr/bin/ + ENTRYPOINT ["zksync_prover_fri_gateway"] diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index 16677d128ee..1fda048ca33 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,7 +17,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* @@ -29,6 +25,6 @@ RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf / # copy VK required for protocol version COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ - COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ + ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gar/Dockerfile b/docker/prover-gar/Dockerfile index 974679cbebf..ced97d6d7e7 100644 --- a/docker/prover-gar/Dockerfile +++ b/docker/prover-gar/Dockerfile @@ -1,5 +1,6 @@ -# syntax=docker/dockerfile:1 -ARG PROVER_IMAGE +# Will work locally only after prior universal key download and Docker login to the private registry + +ARG PROVER_IMAGE=latest FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-v2:2.0-$PROVER_IMAGE as prover FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 as app diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile index 44577a79dc8..bd70be7ee4b 100644 --- a/docker/prover-gpu-fri-gar/Dockerfile +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -1,8 +1,7 @@ -# syntax=docker/dockerfile:1 ARG PROVER_IMAGE FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-gpu-fri:2.0-$PROVER_IMAGE as prover -FROM nvidia/cuda:12.0.0-runtime-ubuntu22.04 as app +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 as app # HACK copying to root is the only way to make Docker layer caching work for these files for some reason COPY *.bin / diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 8234dd2b6b0..5e37c089ed9 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,8 +1,4 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental -FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive @@ -14,13 +10,13 @@ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH -ENV CUDAARCHS=75 +# Building for Nvidia L4 +ENV CUDAARCHS=89 RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup install nightly-2023-07-21 && \ rustup default nightly-2023-07-21 -# Setup cmake RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local @@ -28,13 +24,15 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release --features "gpu" +RUN cargo build --release --features "gpu" FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_prover_fri /usr/bin/ + ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index 74ce3c12c10..25e0d8d8e0d 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -1,12 +1,12 @@ -# For using private GitHub dependencies, CI downdloads all crates and bellman-cuda dependency outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build and universal setup key download -# syntax=docker/dockerfile:experimental FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ +ARG ERA_BELLMAN_CUDA_RELEASE=latest + +RUN apt-get update && apt-get install -y curl jq clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -19,11 +19,31 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup default nightly-2023-07-21 WORKDIR /usr/src/zksync -COPY . . + +ENV GITHUB_OWNER=matter-labs +ENV GITHUB_REPO=era-bellman-cuda + +RUN set -e; \ + if [ "$ERA_BELLMAN_CUDA_RELEASE" = "latest" ]; then \ + latest_release_data=$(curl --silent "https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPO}/releases"); \ + latest_tag=$(echo "$latest_release_data" | jq -r '.[0].tag_name'); \ + source_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/archive/refs/tags/${latest_tag}.tar.gz"; \ + binary_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/releases/download/${latest_tag}/bellman-cuda.tar.gz"; \ + else \ + source_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/archive/refs/tags/${ERA_BELLMAN_CUDA_RELEASE}.tar.gz"; \ + binary_url="https://github.com/${GITHUB_OWNER}/${GITHUB_REPO}/releases/download/${ERA_BELLMAN_CUDA_RELEASE}/bellman-cuda.tar.gz"; \ + fi; \ + curl --silent --location "$source_url" --output bellman-cuda-source.tar.gz; \ + curl --silent --location "$binary_url" --output bellman-cuda.tar.gz; \ + mkdir -p bellman-cuda; \ + tar xvfz bellman-cuda.tar.gz -C ./bellman-cuda; \ + tar xvfz bellman-cuda-source.tar.gz -C ./bellman-cuda --strip-components=1 ENV BELLMAN_CUDA_DIR=/usr/src/zksync/bellman-cuda -RUN CARGO_HOME=./cargo cargo build --release +COPY . . + +RUN cargo build --release FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 as runner @@ -40,4 +60,5 @@ COPY setup_2\^26.key /etc/ COPY core/bin/verification_key_generator_and_server/data/ /core/bin/verification_key_generator_and_server/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_prover /usr/bin/ + ENTRYPOINT ["zksync_prover"] diff --git a/docker/runner/Dockerfile b/docker/runner/Dockerfile deleted file mode 100644 index bec7c1e015f..00000000000 --- a/docker/runner/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM tcardonne/github-runner -FROM docker:dind -RUN apk update -RUN apk add py-pip python3-dev libffi-dev openssl-dev gcc libc-dev make -RUN pip install docker-compose diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index abd92a76320..074e2e306ec 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,22 +1,24 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally +# Will work locally only after prior contracts build -# syntax=docker/dockerfile:experimental FROM rust:1.72-bookworm as builder + RUN apt-get update && apt-get install -y linux-libc-dev liburing-dev clang && \ - # ^ We need a newer version of `linux-libc-dev` from backports than the one installed by default rm -rf /var/lib/apt/lists/* + WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release --features=rocksdb/io-uring +RUN cargo build --release --features=rocksdb/io-uring FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ rm -rf /var/lib/apt/lists/* + EXPOSE 3000 EXPOSE 3031 EXPOSE 3030 + COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index a917a73669f..984d8520313 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,12 +17,14 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_witness_generator /usr/bin/ + ENTRYPOINT ["zksync_witness_generator"] diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 4b26646801c..b7e1c320cfb 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,7 +1,3 @@ -# For using private GitHub dependencies, CI downdloads all crates outside of the contatiner -# Not expected to work locally - -# syntax=docker/dockerfile:experimental FROM debian:bookworm-slim as builder ARG DEBIAN_FRONTEND=noninteractive @@ -21,13 +17,15 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ WORKDIR /usr/src/zksync COPY . . -RUN CARGO_HOME=./cargo cargo build --release +RUN cargo build --release FROM debian:bookworm-slim + RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation COPY prover/vk_setup_data_generator_server_fri/data/ /prover/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/target/release/zksync_witness_vector_generator /usr/bin/ + ENTRYPOINT ["zksync_witness_vector_generator"] diff --git a/etc/scripts/prepare_bellman_cuda.sh b/etc/scripts/prepare_bellman_cuda.sh deleted file mode 100755 index db0ba745bba..00000000000 --- a/etc/scripts/prepare_bellman_cuda.sh +++ /dev/null @@ -1,7 +0,0 @@ -echo "preparing bellman cuda directory" -gh release -R github.com/matter-labs/bellman-cuda download "$1" -gh release -R github.com/matter-labs/bellman-cuda download "$1" -A tar.gz -mkdir -p bellman-cuda -tar xvf bellman-cuda.tar.gz -C ./bellman-cuda -tar xvf bellman-cuda-"$1".tar.gz -mv bellman-cuda-"$1"/* ./bellman-cuda/ diff --git a/etc/scripts/prepare_era_bellman_cuda.sh b/etc/scripts/prepare_era_bellman_cuda.sh deleted file mode 100755 index 270c326217b..00000000000 --- a/etc/scripts/prepare_era_bellman_cuda.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -e - -source="$1" -binary="$2" - -curl --silent --location "${source}" --output bellman-cuda-source.tar.gz -curl --silent --location "${binary}" --output bellman-cuda.tar.gz -mkdir -p bellman-cuda -tar xvfz bellman-cuda.tar.gz -C ./bellman-cuda -tar xvfz bellman-cuda-source.tar.gz -C ./bellman-cuda --strip-components=1 diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 64c23828d1f..17e93117b96 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -86,22 +86,30 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin } async function _build(image: string, tagList: string[]) { - if (image == 'server-v2' || image == 'external-node' || image == 'prover') { + if (image === 'server-v2' || image === 'external-node' || image === 'prover') { await contract.build(); } const tagsToBuild = tagList.map((tag) => `-t matterlabs/${image}:${tag}`).join(' '); - // generate list of tags for image - we want 3 tags (latest, SHA, SHA+TimeStamp) for listed components and only "latest" for everything else - await utils.spawn(`CARGO_HOME=./cargo cargo fetch`); + // Conditionally add build argument if image is prover-v2 + let buildArgs = ''; + if (image === 'prover-v2') { + const eraBellmanCudaRelease = process.env.ERA_BELLMAN_CUDA_RELEASE; + buildArgs = `--build-arg ERA_BELLMAN_CUDA_RELEASE=${eraBellmanCudaRelease}`; + } // HACK // For prover-v2 which is not a prover, but should be built from the prover dockerfile. So here we go. const imagePath = image == 'prover-v2' ? 'prover' : image; - // build image with needed tags - await utils.spawn(`DOCKER_BUILDKIT=1 docker build ${tagsToBuild} -f ./docker/${imagePath}/Dockerfile .`); + const buildCommand = + `DOCKER_BUILDKIT=1 docker build ${tagsToBuild}` + + (buildArgs ? ` ${buildArgs}` : '') + + ` -f ./docker/${imagePath}/Dockerfile .`; + + await utils.spawn(buildCommand); } async function _push(image: string, tagList: string[], publishPublic: boolean = false) { From fa7165002884e7137b623feec3721cbbe3332a40 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 9 Oct 2023 12:13:12 +0300 Subject: [PATCH 8/8] fix(db): drop constraint prover_jobs_fri_l1_batch_number_fkey (#173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ `prover_jobs_fri_l1_batch_number_fkey` is dropped. ## Why ❔ `prover_jobs_fri` is filled only in prover DB, while `l1_batches` -- only in core DB, so we can't have the constraint. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- ...009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql | 2 ++ ...31009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql | 1 + 2 files changed, 3 insertions(+) create mode 100644 core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql create mode 100644 core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql diff --git a/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql new file mode 100644 index 00000000000..ca6f0f84302 --- /dev/null +++ b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE prover_jobs_fri ADD CONSTRAINT prover_jobs_fri_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number); diff --git a/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql new file mode 100644 index 00000000000..2835b15b64f --- /dev/null +++ b/core/lib/dal/migrations/20231009073918_drop-prover_jobs_fri_l1_batch_number_fkey.up.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri DROP CONSTRAINT IF EXISTS prover_jobs_fri_l1_batch_number_fkey;