diff --git a/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json new file mode 100644 index 00000000000..84ff845b0d0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS tx_index_in_block,\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "tx_index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "call_trace", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa" +} diff --git a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json b/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json deleted file mode 100644 index 81981683e89..00000000000 --- a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3" -} diff --git a/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json new file mode 100644 index 00000000000..0b1f56ef9f3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e" +} diff --git a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json b/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json deleted file mode 100644 index 906cd108140..00000000000 --- a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "call_trace", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5" -} diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 36a4acc0a6d..3d17a919a07 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -527,7 +527,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { + ) -> DalResult> { let protocol_version = sqlx::query!( r#" SELECT @@ -554,6 +554,8 @@ impl BlocksWeb3Dal<'_, '_> { CallTrace, r#" SELECT + transactions.hash AS tx_hash, + transactions.index_in_block AS tx_index_in_block, call_trace FROM call_traces @@ -570,7 +572,11 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_all(self.storage) .await? .into_iter() - .map(|call_trace| call_trace.into_call(protocol_version)) + .map(|call_trace| { + let hash = H256::from_slice(&call_trace.tx_hash); + let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; + (call_trace.into_call(protocol_version), hash, index) + }) .collect()) } @@ -1084,8 +1090,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for (trace, tx_result) in traces.iter().zip(&tx_results) { + for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); + assert_eq!(&tx_result.hash, hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 9f67e9025e0..bb219ee1d61 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -561,32 +561,38 @@ impl StorageApiTransaction { #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct CallTrace { pub call_trace: Vec, + pub tx_hash: Vec, + pub tx_index_in_block: Option, } impl CallTrace { pub(crate) fn into_call(self, protocol_version: ProtocolVersionId) -> Call { - if protocol_version.is_pre_1_5_0() { - if let Ok(legacy_call_trace) = bincode::deserialize::(&self.call_trace) { - legacy_call_trace.into() - } else { - let legacy_mixed_call_trace = - bincode::deserialize::(&self.call_trace) - .expect("Failed to deserialize call trace"); - legacy_mixed_call_trace.into() - } - } else { - bincode::deserialize(&self.call_trace).unwrap() - } + parse_call_trace(&self.call_trace, protocol_version) } +} - pub(crate) fn from_call(call: Call, protocol_version: ProtocolVersionId) -> Self { - let call_trace = if protocol_version.is_pre_1_5_0() { - bincode::serialize(&LegacyCall::try_from(call).unwrap()) +pub(crate) fn parse_call_trace(call_trace: &[u8], protocol_version: ProtocolVersionId) -> Call { + if protocol_version.is_pre_1_5_0() { + if let Ok(legacy_call_trace) = bincode::deserialize::(call_trace) { + legacy_call_trace.into() } else { - bincode::serialize(&call) + let legacy_mixed_call_trace = bincode::deserialize::(call_trace) + .expect("Failed to deserialize call trace"); + legacy_mixed_call_trace.into() } - .unwrap(); + } else { + bincode::deserialize(call_trace).unwrap() + } +} - Self { call_trace } +pub(crate) fn serialize_call_into_bytes( + call: Call, + protocol_version: ProtocolVersionId, +) -> Vec { + if protocol_version.is_pre_1_5_0() { + bincode::serialize(&LegacyCall::try_from(call).unwrap()) + } else { + bincode::serialize(&call) } + .unwrap() } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 0a72289b48a..408837d699e 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -20,7 +20,9 @@ use zksync_vm_interface::{ }; use crate::{ - models::storage_transaction::{CallTrace, StorageTransaction}, + models::storage_transaction::{ + parse_call_trace, serialize_call_into_bytes, StorageTransaction, + }, Core, CoreDal, }; @@ -521,8 +523,7 @@ impl TransactionsDal<'_, '_> { let mut bytea_call_traces = Vec::with_capacity(transactions.len()); for tx_res in transactions { if let Some(call_trace) = tx_res.call_trace() { - bytea_call_traces - .push(CallTrace::from_call(call_trace, protocol_version).call_trace); + bytea_call_traces.push(serialize_call_into_bytes(call_trace, protocol_version)); call_traces_tx_hashes.push(tx_res.hash.as_bytes()); } } @@ -2112,11 +2113,12 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + index_in_block FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2139,8 +2141,7 @@ impl TransactionsDal<'_, '_> { .map(|v| (v as u16).try_into().unwrap()) .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - Ok(sqlx::query_as!( - CallTrace, + Ok(sqlx::query!( r#" SELECT call_trace @@ -2155,7 +2156,12 @@ impl TransactionsDal<'_, '_> { .with_arg("tx_hash", &tx_hash) .fetch_optional(self.storage) .await? - .map(|call_trace| call_trace.into_call(protocol_version))) + .map(|call_trace| { + ( + parse_call_trace(&call_trace.call_trace, protocol_version), + row.index_in_block.unwrap_or_default() as usize, + ) + })) } pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> DalResult> { @@ -2227,7 +2233,7 @@ mod tests { .await .unwrap(); - let call_trace = conn + let (call_trace, _) = conn .transactions_dal() .get_call_trace(tx_hash) .await diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index f648204ca55..432b6c309c1 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -13,7 +13,10 @@ use zksync_contracts::BaseSystemContractsHashes; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; -use crate::{protocol_version::L1VerifierConfig, Address, L2BlockNumber, ProtocolVersionId}; +use crate::{ + debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, + ProtocolVersionId, +}; pub mod en; pub mod state_override; @@ -602,6 +605,7 @@ pub struct ResultDebugCall { } #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "camelCase")] pub enum DebugCallType { #[default] Call, @@ -701,19 +705,20 @@ impl ProtocolVersion { } } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub enum SupportedTracers { CallTracer, + FlatCallTracer, } -#[derive(Debug, Serialize, Deserialize, Clone, Default)] +#[derive(Debug, Serialize, Deserialize, Clone, Default, Copy)] #[serde(rename_all = "camelCase")] pub struct CallTracerConfig { pub only_top_call: bool, } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub struct TracerConfig { pub tracer: SupportedTracers, @@ -721,6 +726,17 @@ pub struct TracerConfig { pub tracer_config: CallTracerConfig, } +impl Default for TracerConfig { + fn default() -> Self { + TracerConfig { + tracer: SupportedTracers::CallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BlockStatus { @@ -728,6 +744,62 @@ pub enum BlockStatus { Verified, } +/// Result tracers need to have a nested result field for compatibility. So we have two different +/// structs 1 for blocks tracing and one for txs and call tracing +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerBlockResult { + CallTrace(Vec), + FlatCallTrace(Vec), +} + +impl CallTracerBlockResult { + pub fn unwrap_flatten(self) -> Vec { + match self { + Self::CallTrace(_) => { + panic!("Result is a FlatCallTrace") + } + Self::FlatCallTrace(a) => a, + } + } + + pub fn unwrap_default(self) -> Vec { + match self { + Self::CallTrace(a) => a, + Self::FlatCallTrace(_) => { + panic!("Result is a CallTrace") + } + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerResult { + CallTrace(DebugCall), + FlatCallTrace(Vec), +} + +impl CallTracerResult { + pub fn unwrap_flat(self) -> Vec { + match self { + Self::CallTrace(_) => { + panic!("Result is a FlatCallTrace") + } + Self::FlatCallTrace(a) => a, + } + } + + pub fn unwrap_default(self) -> DebugCall { + match self { + Self::CallTrace(a) => a, + Self::FlatCallTrace(_) => { + panic!("Result is a CallTrace") + } + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BlockDetailsBase { diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index b5c0d79c857..89a008b5fb5 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -1,26 +1,24 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::Bytes, U256}; -use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, -}; +use crate::{api::DebugCallType, Address, H256}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { pub action: Action, - pub result: CallResult, + pub result: Option, pub subtraces: usize, - pub traceaddress: Vec, - pub error: Option, - pub revert_reason: Option, + pub trace_address: Vec, + pub transaction_position: usize, + pub transaction_hash: H256, + pub r#type: DebugCallType, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Action { - pub r#type: DebugCallType, + pub call_type: DebugCallType, pub from: Address, pub to: Address, pub gas: U256, @@ -34,222 +32,3 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } - -pub fn flatten_debug_calls(calls: Vec) -> Vec { - let mut flattened_calls = Vec::new(); - for (index, result_debug_call) in calls.into_iter().enumerate() { - let mut trace_address = vec![index]; // Initialize the trace addressees with the index of the top-level call - flatten_call_recursive( - &result_debug_call.result, - &mut flattened_calls, - &mut trace_address, - ); - } - flattened_calls -} - -fn flatten_call_recursive( - call: &DebugCall, - flattened_calls: &mut Vec, - trace_address: &mut Vec, -) { - let flat_call = DebugCallFlat { - action: Action { - r#type: call.r#type.clone(), - from: call.from, - to: call.to, - gas: call.gas, - value: call.value, - input: call.input.clone(), - }, - result: CallResult { - output: call.output.clone(), - gas_used: call.gas_used, - }, - subtraces: call.calls.len(), - traceaddress: trace_address.clone(), // Clone the current trace address - error: call.error.clone(), - revert_reason: call.revert_reason.clone(), - }; - flattened_calls.push(flat_call); - - // Process nested calls - for (index, nested_call) in call.calls.iter().enumerate() { - trace_address.push(index); // Update trace addressees for the nested call - flatten_call_recursive(nested_call, flattened_calls, trace_address); - trace_address.pop(); // Reset trace addressees after processing the nested call (prevent to keep filling the vector) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, BOOTLOADER_ADDRESS, - }; - - #[test] - fn test_flatten_debug_call() { - let result_debug_trace: Vec = [1, 1] - .map(|_| ResultDebugCall { - result: new_testing_debug_call(), - }) - .into(); - - let debug_call_flat = flatten_debug_calls(result_debug_trace); - let expected_debug_call_flat = expected_flat_trace(); - assert_eq!(debug_call_flat, expected_debug_call_flat); - } - - fn new_testing_debug_call() -> DebugCall { - DebugCall { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - gas_used: 1000.into(), - value: 0.into(), - output: vec![].into(), - input: vec![].into(), - error: None, - revert_reason: None, - calls: new_testing_trace(), - } - } - - fn new_testing_trace() -> Vec { - let first_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - gas_used: 42.into(), - ..DebugCall::default() - }; - let second_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - value: 123.into(), - gas: 58.into(), - gas_used: 10.into(), - input: Bytes(b"input".to_vec()), - output: Bytes(b"output".to_vec()), - ..DebugCall::default() - }; - [first_call_trace, second_call_trace].into() - } - - fn expected_flat_trace() -> Vec { - [ - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [0, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [0, 1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [1, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [1, 1].into(), - error: None, - revert_reason: None, - }, - ] - .into() - } -} diff --git a/core/lib/web3_decl/src/namespaces/debug.rs b/core/lib/web3_decl/src/namespaces/debug.rs index 1fbe3237104..8ca5622e95d 100644 --- a/core/lib/web3_decl/src/namespaces/debug.rs +++ b/core/lib/web3_decl/src/namespaces/debug.rs @@ -2,8 +2,7 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, }; @@ -26,21 +25,14 @@ pub trait DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult>; - - #[method(name = "traceBlockByNumber.callFlatTracer")] - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceBlockByHash")] async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceCall")] async fn trace_call( @@ -48,12 +40,12 @@ pub trait DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult; + ) -> RpcResult; #[method(name = "traceTransaction")] async fn trace_transaction( &self, tx_hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult>; } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs index 726beae2cc9..50981a2b284 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs @@ -1,6 +1,5 @@ use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, H256, }; @@ -17,27 +16,17 @@ impl DebugNamespaceServer for DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Number(block), options) .await .map_err(|err| self.current_method().map_err(err)) } - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult> { - self.debug_trace_block_flat_impl(BlockId::Number(block), options) - .await - .map_err(|err| self.current_method().map_err(err)) - } - async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Hash(hash), options) .await .map_err(|err| self.current_method().map_err(err)) @@ -48,7 +37,7 @@ impl DebugNamespaceServer for DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult { + ) -> RpcResult { self.debug_trace_call_impl(request, block, options) .await .map_err(|err| self.current_method().map_err(err)) @@ -58,7 +47,7 @@ impl DebugNamespaceServer for DebugNamespace { &self, tx_hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult> { self.debug_trace_transaction_impl(tx_hash, options) .await .map_err(|err| self.current_method().map_err(err)) diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 2c6c70f6faa..68c7951cee4 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -3,8 +3,11 @@ use zksync_dal::{CoreDal, DalError}; use zksync_multivm::interface::{Call, CallType, ExecutionResult, OneshotTracingParams}; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, DebugCallType, ResultDebugCall, TracerConfig}, - debug_flat_call::{flatten_debug_calls, DebugCallFlat}, + api::{ + BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, + ResultDebugCall, SupportedTracers, TracerConfig, + }, + debug_flat_call::{Action, CallResult, DebugCallFlat}, fee_model::BatchFeeInput, l2::L2Tx, transaction_request::CallRequest, @@ -42,13 +45,39 @@ impl DebugNamespace { }) } - pub(crate) fn map_call(call: Call, only_top_call: bool) -> DebugCall { + pub(crate) fn map_call( + call: Call, + index: usize, + transaction_hash: H256, + tracer_option: TracerConfig, + ) -> CallTracerResult { + match tracer_option.tracer { + SupportedTracers::CallTracer => CallTracerResult::CallTrace(Self::map_default_call( + call, + tracer_option.tracer_config.only_top_call, + )), + SupportedTracers::FlatCallTracer => { + let mut calls = vec![]; + let mut traces = vec![index]; + Self::flatten_call( + call, + &mut calls, + &mut traces, + tracer_option.tracer_config.only_top_call, + index, + transaction_hash, + ); + CallTracerResult::FlatCallTrace(calls) + } + } + } + pub(crate) fn map_default_call(call: Call, only_top_call: bool) -> DebugCall { let calls = if only_top_call { vec![] } else { call.calls .into_iter() - .map(|call| Self::map_call(call, false)) + .map(|call| Self::map_default_call(call, false)) .collect() }; let debug_type = match call.r#type { @@ -71,6 +100,63 @@ impl DebugNamespace { } } + fn flatten_call( + call: Call, + calls: &mut Vec, + trace_address: &mut Vec, + only_top_call: bool, + transaction_position: usize, + transaction_hash: H256, + ) { + let subtraces = call.calls.len(); + let debug_type = match call.r#type { + CallType::Call(_) => DebugCallType::Call, + CallType::Create => DebugCallType::Create, + CallType::NearCall => unreachable!("We have to filter our near calls before"), + }; + + let result = if call.error.is_none() { + Some(CallResult { + output: web3::Bytes::from(call.output), + gas_used: U256::from(call.gas_used), + }) + } else { + None + }; + + calls.push(DebugCallFlat { + action: Action { + call_type: debug_type, + from: call.from, + to: call.to, + gas: U256::from(call.gas), + value: call.value, + input: web3::Bytes::from(call.input), + }, + result, + subtraces, + trace_address: trace_address.clone(), // Clone the current trace address + transaction_position, + transaction_hash, + r#type: DebugCallType::Call, + }); + + if !only_top_call { + for (number, call) in call.calls.into_iter().enumerate() { + trace_address.push(number); + Self::flatten_call( + call, + calls, + trace_address, + false, + transaction_position, + transaction_hash, + ); + trace_address.pop(); + } + } + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } @@ -79,16 +165,13 @@ impl DebugNamespace { &self, block_id: BlockId, options: Option, - ) -> Result, Web3Error> { + ) -> Result { self.current_method().set_block_id(block_id); if matches!(block_id, BlockId::Number(BlockNumber::Pending)) { // See `EthNamespace::get_block_impl()` for an explanation why this check is needed. - return Ok(vec![]); + return Ok(CallTracerBlockResult::CallTrace(vec![])); } - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() @@ -99,41 +182,55 @@ impl DebugNamespace { .get_traces_for_l2_block(block_number) .await .map_err(DalError::generalize)?; - let call_trace = call_traces - .into_iter() - .map(|call_trace| { - let result = Self::map_call(call_trace, only_top_call); - ResultDebugCall { result } - }) - .collect(); - Ok(call_trace) - } - pub async fn debug_trace_block_flat_impl( - &self, - block_id: BlockId, - options: Option, - ) -> Result, Web3Error> { - let call_trace = self.debug_trace_block_impl(block_id, options).await?; - let call_trace_flat = flatten_debug_calls(call_trace); - Ok(call_trace_flat) + let options = options.unwrap_or_default(); + let result = match options.tracer { + SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( + call_traces + .into_iter() + .map(|(call, _, _)| ResultDebugCall { + result: Self::map_default_call(call, options.tracer_config.only_top_call), + }) + .collect(), + ), + SupportedTracers::FlatCallTracer => { + let mut flat_calls = vec![]; + for (call, tx_hash, tx_index) in call_traces { + let mut traces = vec![tx_index]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + tx_index, + tx_hash, + ); + } + CallTracerBlockResult::FlatCallTrace(flat_calls) + } + }; + Ok(result) } pub async fn debug_trace_transaction_impl( &self, tx_hash: H256, options: Option, - ) -> Result, Web3Error> { - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + ) -> Result, Web3Error> { let mut connection = self.state.acquire_connection().await?; let call_trace = connection .transactions_dal() .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|call_trace| Self::map_call(call_trace, only_top_call))) + Ok(call_trace.map(|(call_trace, index_in_block)| { + Self::map_call( + call_trace, + index_in_block, + tx_hash, + options.unwrap_or_default(), + ) + })) } pub async fn debug_trace_call_impl( @@ -141,13 +238,11 @@ impl DebugNamespace { mut request: CallRequest, block_id: Option, options: Option, - ) -> Result { + ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + let options = options.unwrap_or_default(); let mut connection = self.state.acquire_connection().await?; let block_args = self @@ -182,7 +277,7 @@ impl DebugNamespace { // We don't need properly trace if we only need top call let tracing_params = OneshotTracingParams { - trace_calls: !only_top_call, + trace_calls: !options.tracer_config.only_top_call, }; let connection = self.state.acquire_connection().await?; @@ -212,7 +307,8 @@ impl DebugNamespace { )) } }; - + // It's a call request, it's safe to keep it zero + let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -222,6 +318,6 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, false)) + Ok(Self::map_call(call, 0, hash, options)) } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 76496b42cad..7711570c3c5 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -1,7 +1,10 @@ //! Tests for the `debug` Web3 namespace. use zksync_multivm::interface::{Call, TransactionExecutionResult}; -use zksync_types::BOOTLOADER_ADDRESS; +use zksync_types::{ + api::{CallTracerConfig, SupportedTracers, TracerConfig}, + BOOTLOADER_ADDRESS, +}; use zksync_web3_decl::{ client::{DynClient, L2}, namespaces::DebugNamespaceClient, @@ -58,18 +61,19 @@ impl HttpTest for TraceBlockTest { let block_traces = match block_id { api::BlockId::Number(number) => client.trace_block_by_number(number, None).await?, api::BlockId::Hash(hash) => client.trace_block_by_hash(hash, None).await?, - }; + } + .unwrap_default(); assert_eq!(block_traces.len(), tx_results.len()); // equals to the number of transactions in the block for (trace, tx_result) in block_traces.iter().zip(&tx_results) { - let api::ResultDebugCall { result } = trace; + let result = &trace.result; assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_result.transaction.gas_limit()); let expected_calls: Vec<_> = tx_result .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); assert_eq!(result.calls, expected_calls); } @@ -122,7 +126,18 @@ impl HttpTest for TraceBlockFlatTest { for block_id in block_ids { if let api::BlockId::Number(number) = block_id { - let block_traces = client.trace_block_by_number_flat(number, None).await?; + let block_traces = client + .trace_block_by_number( + number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) + .await? + .unwrap_flatten(); // A transaction with 2 nested calls will convert into 3 Flattened calls. // Also in this test, all tx have the same # of nested calls @@ -133,10 +148,10 @@ impl HttpTest for TraceBlockFlatTest { // First tx has 2 nested calls, thus 2 sub-traces assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].traceaddress, [0]); + assert_eq!(block_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].traceaddress, [0, 0]); + assert_eq!(block_traces[1].trace_address, [0, 0]); let top_level_call_indexes = [0, 3, 6]; let top_level_traces = top_level_call_indexes @@ -157,7 +172,15 @@ impl HttpTest for TraceBlockFlatTest { let missing_block_number = api::BlockNumber::from(*self.0 + 100); let error = client - .trace_block_by_number_flat(missing_block_number, None) + .trace_block_by_number( + missing_block_number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) .await .unwrap_err(); if let ClientError::Call(error) = error { @@ -198,13 +221,14 @@ impl HttpTest for TraceTransactionTest { let expected_calls: Vec<_> = tx_results[0] .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); let result = client .trace_transaction(tx_results[0].hash, None) .await? - .context("no transaction traces")?; + .context("no transaction traces")? + .unwrap_default(); assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_results[0].transaction.gas_limit()); diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 9bdcf115930..b8f74370303 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -488,12 +488,16 @@ impl HttpTest for TraceCallTest { self.fee_input.expect_default(Self::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; @@ -506,7 +510,8 @@ impl HttpTest for TraceCallTest { Some(api::BlockId::Number(number)), None, ) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); } @@ -557,12 +562,16 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { ) -> anyhow::Result<()> { self.fee_input.expect_default(TraceCallTest::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; @@ -584,7 +593,8 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { let number = api::BlockId::Number(number); let call_result = client .trace_call(call_request.clone(), Some(number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); } Ok(()) diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index 054aa57cf64..2af18c8438b 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -50,7 +50,7 @@ describe('Debug methods', () => { output: '0x', revertReason: 'Error function_selector = 0x, data = 0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) }; @@ -75,7 +75,7 @@ describe('Debug methods', () => { input: expect.any(String), output: '0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) // We intentionally skip `error` and `revertReason` fields: the block may contain failing txs @@ -99,7 +99,7 @@ describe('Debug methods', () => { output: '0x', revertReason: null, to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: '0x0', calls: expect.any(Array) }; diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 181dc857c36..86e6568f8e4 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -200,7 +200,7 @@ mod tests { round: AggregationRound::BasicCircuits, }; - let result = get_setup_data_key(key.clone()); + let result = get_setup_data_key(key); // Check if the key has remained same assert_eq!(key, result); diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json new file mode 100644 index 00000000000..c99572bcc8e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1\n WHERE\n l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json b/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json deleted file mode 100644 index 4015a22ff3f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE leaf_aggregation_witness_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND circuit_id = $4", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int2", - "Int8", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json similarity index 53% rename from prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json rename to prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json index ce9e492a7d4..05163dcfa2e 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (WHERE status = 'queued') as queued,\n COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ null ] }, - "hash": "97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464" + "hash": "29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json new file mode 100644 index 00000000000..50d121213fb --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND sequence_number = $4\n AND aggregation_round = $5\n AND circuit_id = $6\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json similarity index 70% rename from prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json rename to prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json index 14463ecbe42..bf8db798e7d 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12" + "hash": "35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json similarity index 70% rename from prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json rename to prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json index 8f5b046b974..d7eb6a32b42 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c" + "hash": "3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json similarity index 69% rename from prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json rename to prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json index 3c4c8d7a29f..c97fe7f4042 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574" + "hash": "37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json b/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json deleted file mode 100644 index 5cec4d7d7d0..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number,\n status,\n number_of_final_node_jobs,\n created_at,\n updated_at\n )\n VALUES\n ($1, 'waiting_for_proofs',1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json b/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json deleted file mode 100644 index 063ae8fc90a..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET status = $3\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text" - ] - }, - "nullable": [] - }, - "hash": "3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json b/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json deleted file mode 100644 index 69390508415..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_compression_jobs_fri (\n l1_batch_number,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json b/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json deleted file mode 100644 index 7615523f92f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE prover_jobs_fri SET status = $1\n WHERE l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8", - "Int4", - "Int2", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json new file mode 100644 index 00000000000..f8b141a8dac --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json new file mode 100644 index 00000000000..d23ed8d9fc8 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND circuit_id = $4\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json new file mode 100644 index 00000000000..93532150f7f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json b/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json deleted file mode 100644 index 3d60050c92e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE prover_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND sequence_number =$4\n AND aggregation_round = $5\n AND circuit_id = $6", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int2", - "Int8", - "Int4", - "Int2", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json new file mode 100644 index 00000000000..cadc931fa1c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at)\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json rename to prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json index 208b23d939f..4ee9278fe42 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9" + "hash": "e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json similarity index 78% rename from prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json rename to prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json index 61518273b4d..f8e92b1ad66 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET status = $3\n ", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad" + "hash": "eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e" } diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index 19fe0e4f57b..d0841820337 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -21,11 +21,16 @@ impl CliTestDal<'_, '_> { sequence_number: usize, ) { sqlx::query!( - "UPDATE prover_jobs_fri SET status = $1 - WHERE l1_batch_number = $2 + r#" + UPDATE prover_jobs_fri + SET + status = $1 + WHERE + l1_batch_number = $2 AND sequence_number = $3 AND aggregation_round = $4 - AND circuit_id = $5", + AND circuit_id = $5 + "#, status.to_string(), batch_number.0 as i64, sequence_number as i64, @@ -44,7 +49,7 @@ impl CliTestDal<'_, '_> { circuit_id: u8, ) { sqlx::query!( - " + r#" INSERT INTO leaf_aggregation_witness_jobs_fri ( l1_batch_number, @@ -58,8 +63,9 @@ impl CliTestDal<'_, '_> { ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE - SET status = $3 - ", + SET + status = $3 + "#, batch_number.0 as i64, circuit_id as i16, status.to_string() @@ -76,21 +82,16 @@ impl CliTestDal<'_, '_> { circuit_id: u8, ) { sqlx::query!( - " + r#" INSERT INTO - node_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - status, - created_at, - updated_at - ) + node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at) VALUES ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE - SET status = $3 - ", + SET + status = $3 + "#, batch_number.0 as i64, circuit_id as i16, status.to_string(), @@ -102,21 +103,16 @@ impl CliTestDal<'_, '_> { pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { sqlx::query!( - " + r#" INSERT INTO - recursion_tip_witness_jobs_fri ( - l1_batch_number, - status, - number_of_final_node_jobs, - created_at, - updated_at - ) + recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at) VALUES - ($1, 'waiting_for_proofs',1, NOW(), NOW()) + ($1, 'waiting_for_proofs', 1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -131,7 +127,7 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - " + r#" INSERT INTO scheduler_witness_jobs_fri ( l1_batch_number, @@ -144,8 +140,9 @@ impl CliTestDal<'_, '_> { ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -160,20 +157,16 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - " + r#" INSERT INTO - proof_compression_jobs_fri ( - l1_batch_number, - status, - created_at, - updated_at - ) + proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) VALUES ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -192,12 +185,17 @@ impl CliTestDal<'_, '_> { sequence_number: usize, ) { sqlx::query!( - "UPDATE prover_jobs_fri - SET status = $1, attempts = $2 - WHERE l1_batch_number = $3 - AND sequence_number =$4 + r#" + UPDATE prover_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND sequence_number = $4 AND aggregation_round = $5 - AND circuit_id = $6", + AND circuit_id = $6 + "#, status.to_string(), attempts as i64, batch_number.0 as i64, @@ -218,10 +216,15 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - "UPDATE leaf_aggregation_witness_jobs_fri - SET status = $1, attempts = $2 - WHERE l1_batch_number = $3 - AND circuit_id = $4", + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND circuit_id = $4 + "#, status.to_string(), attempts as i64, batch_number.0 as i64, diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 1a3b8de0ce4..71d0c11728b 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -528,8 +528,14 @@ impl FriProverDal<'_, '_> { SELECT protocol_version AS "protocol_version!", protocol_version_patch AS "protocol_version_patch!", - COUNT(*) FILTER (WHERE status = 'queued') as queued, - COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress + COUNT(*) FILTER ( + WHERE + status = 'queued' + ) AS queued, + COUNT(*) FILTER ( + WHERE + status = 'in_progress' + ) AS in_progress FROM prover_jobs_fri WHERE diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 66e34f7f8e7..c7ba0f60ef3 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1719,7 +1719,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1786,7 +1789,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1827,7 +1833,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, diff --git a/yarn.lock b/yarn.lock index b70e64f148a..3c764c7c7b7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6903,7 +6903,7 @@ jest-each@^29.7.0: jest-util "^29.7.0" pretty-format "^29.7.0" -jest-environment-node@^29.7.0: +jest-environment-node@^29.0.3, jest-environment-node@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==