From 15c9cf631f536baf6ed68d91e097298136225aad Mon Sep 17 00:00:00 2001 From: Marko Atanasievski Date: Mon, 1 Jul 2024 20:26:45 +0200 Subject: [PATCH 1/6] fix: docker build for worker and leader (#329) * fix: docker build for worker and leader * fix: missing env and cargo lock search path * add: build and push leader and worker docker images to ghcr.io * feat: add docker build test * fix: change image names * fix: remove debug build and push * fix: add eof line * fix: comment * fix: build --- .github/workflows/docker_build.yml | 35 +++++++++++ .github/workflows/docker_build_push.yml | 83 +++++++++++++++++++++++++ leader.Dockerfile | 60 ++++++++++++++++++ worker.Dockerfile | 40 ++++++++++++ zero_bin/leader.Dockerfile | 43 ------------- zero_bin/worker.Dockerfile | 35 ----------- 6 files changed, 218 insertions(+), 78 deletions(-) create mode 100644 .github/workflows/docker_build.yml create mode 100644 .github/workflows/docker_build_push.yml create mode 100644 leader.Dockerfile create mode 100644 worker.Dockerfile delete mode 100644 zero_bin/leader.Dockerfile delete mode 100644 zero_bin/worker.Dockerfile diff --git a/.github/workflows/docker_build.yml b/.github/workflows/docker_build.yml new file mode 100644 index 000000000..8d74a2fa4 --- /dev/null +++ b/.github/workflows/docker_build.yml @@ -0,0 +1,35 @@ +name: Docker Build & Run + +on: + push: + branches: [develop, main] + pull_request: + branches: + - "**" + workflow_dispatch: + branches: + - "**" + +jobs: + docker: + name: Build and run leader and worker docker images for regression check + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build leader docker container + run: | + docker build --progress plain -t leader:${{ github.ref_name }} -f leader.Dockerfile . + + - name: Run leader docker container + run: | + docker run --rm leader:${{ github.ref_name }} --help + + - name: Build worker docker container + run: | + docker build --progress plain -t worker:${{ github.ref_name }} -f worker.Dockerfile . + + - name: Run worker docker container + run: | + docker run --rm worker:${{ github.ref_name }} --help diff --git a/.github/workflows/docker_build_push.yml b/.github/workflows/docker_build_push.yml new file mode 100644 index 000000000..112ea158e --- /dev/null +++ b/.github/workflows/docker_build_push.yml @@ -0,0 +1,83 @@ +name: Docker Build & Push + +on: + push: + branches: [develop, main] + release: + types: [created] + +env: + REGISTRY: ghcr.io + IMAGE_NAME_LEADER: ${{ github.repository }}-leader + IMAGE_NAME_WORKER: ${{ github.repository }}-worker + +jobs: + docker: + name: Build and push leader and worker docker images to GitHub Container Registry + runs-on: ubuntu-latest + permissions: + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Leader Docker + id: meta_leader + uses: docker/metadata-action@v5 + with: + images: | + name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LEADER }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Push to GitHub Container Registry - Leader + uses: docker/build-push-action@v3 + with: + context: . + file: ./leader.Dockerfile + push: true + # platforms: linux/amd64,linux/arm64 + tags: ${{ steps.meta_leader.outputs.tags }} + labels: ${{ steps.meta_leader.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Extract metadata (tags, labels) for Worker Docker + id: meta_worker + uses: docker/metadata-action@v5 + with: + images: | + name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME_WORKER }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Push to GitHub Container Registry - Worker + uses: docker/build-push-action@v3 + with: + context: . + file: ./worker.Dockerfile + push: true + # platforms: linux/amd64,linux/arm64 + tags: ${{ steps.meta_worker.outputs.tags }} + labels: ${{ steps.meta_worker.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/leader.Dockerfile b/leader.Dockerfile new file mode 100644 index 000000000..ef69a430f --- /dev/null +++ b/leader.Dockerfile @@ -0,0 +1,60 @@ +FROM rustlang/rust:nightly-bullseye-slim as builder + +RUN apt-get update && apt-get install -y libjemalloc2 libjemalloc-dev make libssl-dev pkg-config + +RUN mkdir -p zero_bin +COPY Cargo.toml . +# Cleanup all workspace members and add selected crates again +RUN sed -i '/members =/{:a;N;/]/!ba};//d' Cargo.toml +RUN sed -i 's#\[workspace\]#\[workspace\]\nmembers = \["zero_bin\/leader", "zero_bin\/prover", "zero_bin\/rpc", "zero_bin\/common", \ + "zero_bin\/ops"\, "evm_arithmetization", "trace_decoder", "mpt_trie", "proc_macro", "compat"\]#' Cargo.toml +COPY Cargo.lock . +COPY ./rust-toolchain.toml ./ +RUN cat ./Cargo.toml +COPY ./.env ./.env + +COPY proof_gen proof_gen +COPY mpt_trie mpt_trie +COPY proc_macro proc_macro +COPY compat compat +COPY trace_decoder trace_decoder +COPY evm_arithmetization evm_arithmetization +COPY zero_bin/common zero_bin/common +COPY zero_bin/ops zero_bin/ops +COPY zero_bin/rpc zero_bin/rpc +COPY zero_bin/prover zero_bin/prover +COPY zero_bin/leader zero_bin/leader + + +RUN \ + touch zero_bin/common/src/lib.rs && \ + touch zero_bin/ops/src/lib.rs && \ + touch zero_bin/leader/src/main.rs && \ + touch zero_bin/rpc/src/lib.rs && \ + touch zero_bin/prover/src/lib.rs && \ + touch evm_arithmetization/src/lib.rs && \ + touch trace_decoder/src/lib.rs && \ + touch mpt_trie/src/lib.rs && \ + touch proc_macro/src/lib.rs && \ + touch compat/src/lib.rs + +# Disable the lld linker for now, as it's causing issues with the linkme package. +# https://github.com/rust-lang/rust/pull/124129 +# https://github.com/dtolnay/linkme/pull/88 +ENV RUSTFLAGS='-C target-cpu=native -Zlinker-features=-lld' + +RUN cargo build --release --bin leader +RUN cargo build --release --bin rpc + + +FROM debian:bullseye-slim +RUN apt-get update && apt-get install -y ca-certificates libjemalloc2 +COPY --from=builder ./target/release/leader /usr/local/bin/leader +COPY --from=builder ./target/release/rpc /usr/local/bin/rpc +COPY --from=builder ./.env /.env + +# Workaround for the issue with the Cargo.lock search path +# Related to issue https://github.com/0xPolygonZero/zk_evm/issues/311 +RUN mkdir -p zero_bin/leader + +ENTRYPOINT ["/usr/local/bin/leader"] diff --git a/worker.Dockerfile b/worker.Dockerfile new file mode 100644 index 000000000..865fb7bb6 --- /dev/null +++ b/worker.Dockerfile @@ -0,0 +1,40 @@ +FROM rustlang/rust:nightly-bullseye-slim as builder + +RUN apt-get update && apt-get install -y libjemalloc2 libjemalloc-dev make libssl-dev pkg-config + +RUN mkdir -p zero_bin +COPY Cargo.toml . +# Cleanup all workspace members and add selected crates again +RUN sed -i '/members =/{:a;N;/]/!ba};//d' Cargo.toml +RUN sed -i 's#\[workspace\]#\[workspace\]\nmembers = \["zero_bin\/worker", "zero_bin\/common", "zero_bin\/ops"\, "evm_arithmetization", "mpt_trie", "proc_macro"\]#' Cargo.toml +COPY Cargo.lock . +COPY ./rust-toolchain.toml ./ + +COPY proof_gen proof_gen +COPY mpt_trie mpt_trie +COPY evm_arithmetization evm_arithmetization +COPY proc_macro proc_macro +COPY zero_bin/common zero_bin/common +COPY zero_bin/ops zero_bin/ops +COPY zero_bin/worker zero_bin/worker + +RUN \ + touch zero_bin/common/src/lib.rs && \ + touch zero_bin/ops/src/lib.rs && \ + touch zero_bin/worker/src/main.rs && \ + touch evm_arithmetization/src/lib.rs && \ + touch mpt_trie/src/lib.rs && \ + touch proc_macro/src/lib.rs + +# Disable the lld linker for now, as it's causing issues with the linkme package. +# https://github.com/rust-lang/rust/pull/124129 +# https://github.com/dtolnay/linkme/pull/88 +ENV RUSTFLAGS='-C target-cpu=native -Zlinker-features=-lld' + +RUN cargo build --release --bin worker + +FROM debian:bullseye-slim +RUN apt-get update && apt-get install -y ca-certificates libjemalloc2 +COPY --from=builder ./target/release/worker /usr/local/bin/worker +ENTRYPOINT ["/usr/local/bin/worker"] + diff --git a/zero_bin/leader.Dockerfile b/zero_bin/leader.Dockerfile deleted file mode 100644 index 7e0eb6e8c..000000000 --- a/zero_bin/leader.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM rustlang/rust:nightly-bullseye-slim as builder - -RUN apt-get update && apt-get install -y libjemalloc2 libjemalloc-dev make libssl-dev pkg-config - -RUN \ - mkdir -p ops/src && touch ops/src/lib.rs && \ - mkdir -p common/src && touch common/src/lib.rs && \ - mkdir -p rpc/src && touch rpc/src/lib.rs && \ - mkdir -p prover/src && touch prover/src/lib.rs && \ - mkdir -p leader/src && echo "fn main() {println!(\"YO!\");}" > leader/src/main.rs - -COPY Cargo.toml . -RUN sed -i "2s/.*/members = [\"ops\", \"leader\", \"common\", \"rpc\", \"prover\"]/" Cargo.toml -COPY Cargo.lock . - -COPY ops/Cargo.toml ./ops/Cargo.toml -COPY common/Cargo.toml ./common/Cargo.toml -COPY rpc/Cargo.toml ./rpc/Cargo.toml -COPY prover/Cargo.toml ./prover/Cargo.toml -COPY leader/Cargo.toml ./leader/Cargo.toml - -COPY ./rust-toolchain.toml ./ - -RUN cargo build --release --bin leader - -COPY ops ./ops -COPY common ./common -COPY rpc ./rpc -COPY prover ./prover -COPY leader ./leader -RUN \ - touch ops/src/lib.rs && \ - touch common/src/lib.rs && \ - touch rpc/src/lib.rs && \ - touch prover/src/lib.rs && \ - touch leader/src/main.rs - -RUN cargo build --release --bin leader - -FROM debian:bullseye-slim -RUN apt-get update && apt-get install -y ca-certificates libjemalloc2 -COPY --from=builder ./target/release/leader /usr/local/bin/leader -CMD ["leader"] diff --git a/zero_bin/worker.Dockerfile b/zero_bin/worker.Dockerfile deleted file mode 100644 index 39036aa2c..000000000 --- a/zero_bin/worker.Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -FROM rustlang/rust:nightly-bullseye-slim as builder - -RUN apt-get update && apt-get install -y libjemalloc2 libjemalloc-dev make libssl-dev - -RUN \ - mkdir -p common/src && touch common/src/lib.rs && \ - mkdir -p ops/src && touch ops/src/lib.rs && \ - mkdir -p worker/src && echo "fn main() {println!(\"YO!\");}" > worker/src/main.rs - -COPY Cargo.toml . -RUN sed -i "2s/.*/members = [\"common\", \"ops\", \"worker\"]/" Cargo.toml -COPY Cargo.lock . - -COPY common/Cargo.toml ./common/Cargo.toml -COPY ops/Cargo.toml ./ops/Cargo.toml -COPY worker/Cargo.toml ./worker/Cargo.toml - -COPY ./rust-toolchain.toml ./ - -RUN cargo build --release --bin worker - -COPY common ./common -COPY ops ./ops -COPY worker ./worker -RUN \ - touch common/src/lib.rs && \ - touch ops/src/lib.rs && \ - touch worker/src/main.rs - -RUN cargo build --release --bin worker - -FROM debian:bullseye-slim -RUN apt-get update && apt-get install -y ca-certificates libjemalloc2 -COPY --from=builder ./target/release/worker /usr/local/bin/worker -CMD ["worker"] From 7a4a9bb16bb4d47d571225117d1c0c4c0bca0b50 Mon Sep 17 00:00:00 2001 From: frisitano <35734660+frisitano@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:44:00 +0800 Subject: [PATCH 2/6] parse embedded short nodes (#345) --- mpt_trie/src/builder.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/mpt_trie/src/builder.rs b/mpt_trie/src/builder.rs index 8ab39ca9d..1e0e06bd3 100644 --- a/mpt_trie/src/builder.rs +++ b/mpt_trie/src/builder.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use ethereum_types::H256; use keccak_hash::keccak; +use rlp::{Prototype, Rlp}; use super::{ nibbles::Nibbles, @@ -48,9 +49,22 @@ impl PartialTrieBuilder { /// Inserts variants of extension and leaf nodes into the builder. pub fn insert_short_node_variants_from_proof(&mut self, proof: Vec>) { for node in proof { - let bytes = rlp::decode_list::>(&node); - match bytes.len() { - 2 => self.insert_short_node_variants(bytes), + let rlp = Rlp::new(&node); + match rlp.prototype().expect("rlp data is valid") { + Prototype::List(2) => { + self.insert_short_node_variants(rlp.as_list().expect("valid list")) + } + Prototype::List(17) => { + for i in 0..16 { + if let Ok(entry) = rlp.at(i) { + if let Prototype::List(2) = entry.prototype().expect("valid list") { + self.insert_short_node_variants( + entry.as_list().expect("valid list"), + ) + } + } + } + } _ => continue, } } From a591b6f22c5cd40c519fb74cb92f71606f272953 Mon Sep 17 00:00:00 2001 From: Gio <102917377+gio256@users.noreply.github.com> Date: Tue, 2 Jul 2024 06:26:30 -0600 Subject: [PATCH 3/6] Add `LogicColumnsView` struct for `LogicStark` (#347) * Add LogicColumnsView struct * Add generate-evaluate test for logic stark * Add comments to gen-eval test in logic stark --- evm_arithmetization/src/logic.rs | 234 ++++++++++++++++++++----------- 1 file changed, 151 insertions(+), 83 deletions(-) diff --git a/evm_arithmetization/src/logic.rs b/evm_arithmetization/src/logic.rs index c5f952465..d411a4482 100644 --- a/evm_arithmetization/src/logic.rs +++ b/evm_arithmetization/src/logic.rs @@ -1,3 +1,4 @@ +use core::borrow::Borrow; use core::marker::PhantomData; use ethereum_types::U256; @@ -18,7 +19,7 @@ use starky::stark::Stark; use starky::util::trace_rows_to_poly_values; use crate::all_stark::EvmStarkFrame; -use crate::logic::columns::NUM_COLUMNS; +use crate::logic::columns::{LogicColumnsView, LOGIC_COL_MAP, NUM_COLUMNS}; use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive}; /// Total number of bits per input/output. @@ -32,37 +33,50 @@ const PACKED_LEN: usize = ceil_div_usize(VAL_BITS, PACKED_LIMB_BITS); /// `LogicStark` columns. pub(crate) mod columns { - use core::cmp::min; - use core::ops::Range; - - use super::{PACKED_LEN, PACKED_LIMB_BITS, VAL_BITS}; - - /// 1 if this is an AND operation, 0 otherwise. - pub(crate) const IS_AND: usize = 0; - /// 1 if this is an OR operation, 0 otherwise. - pub(crate) const IS_OR: usize = IS_AND + 1; - /// 1 if this is a XOR operation, 0 otherwise. - pub(crate) const IS_XOR: usize = IS_OR + 1; - /// First input, decomposed into bits. - pub(crate) const INPUT0: Range = (IS_XOR + 1)..(IS_XOR + 1) + VAL_BITS; - /// Second input, decomposed into bits. - pub(crate) const INPUT1: Range = INPUT0.end..INPUT0.end + VAL_BITS; - /// The result is packed in limbs of `PACKED_LIMB_BITS` bits. - pub(crate) const RESULT: Range = INPUT1.end..INPUT1.end + PACKED_LEN; - - /// Returns the column range for each 32 bit chunk in the input. - pub(crate) fn limb_bit_cols_for_input( - input_bits: Range, - ) -> impl Iterator> { - (0..PACKED_LEN).map(move |i| { - let start = input_bits.start + i * PACKED_LIMB_BITS; - let end = min(start + PACKED_LIMB_BITS, input_bits.end); - start..end - }) + use core::mem::transmute; + + use zk_evm_proc_macro::{Columns, DerefColumns}; + + use super::{PACKED_LEN, VAL_BITS}; + use crate::util::indices_arr; + + /// Flag columns for the operation to perform. + #[repr(C)] + #[derive(DerefColumns, Clone, Copy, Debug, Eq, PartialEq)] + pub(crate) struct OpsColumnsView { + /// 1 if this is an AND operation, 0 otherwise. + pub is_and: T, + /// 1 if this is an OR operation, 0 otherwise. + pub is_or: T, + /// 1 if this is a XOR operation, 0 otherwise. + pub is_xor: T, } - /// Number of columns in `LogicStark`. - pub(crate) const NUM_COLUMNS: usize = RESULT.end; + /// Columns for the `LogicStark`. + #[repr(C)] + #[derive(Columns, Clone, Copy, Debug, Eq, PartialEq)] + pub(crate) struct LogicColumnsView { + /// The operation to perform. + pub op: OpsColumnsView, + /// First input, decomposed into bits. + pub input0: [T; VAL_BITS], + /// Second input, decomposed into bits. + pub input1: [T; VAL_BITS], + /// The result is packed in limbs of `PACKED_LIMB_BITS` bits. + pub result: [T; PACKED_LEN], + } + + /// Total number of columns in `LogicStark`. + /// `u8` is guaranteed to have a `size_of` of 1. + pub(crate) const NUM_COLUMNS: usize = core::mem::size_of::>(); + + /// Mapping between [0..NUM_COLUMNS-1] and the logic columns. + pub(crate) const LOGIC_COL_MAP: LogicColumnsView = make_col_map(); + + const fn make_col_map() -> LogicColumnsView { + let indices_arr = indices_arr::(); + unsafe { transmute::<[usize; NUM_COLUMNS], LogicColumnsView>(indices_arr) } + } } /// Creates the vector of `Columns` corresponding to the opcode, the two inputs @@ -73,23 +87,29 @@ pub(crate) fn ctl_data() -> Vec> { // will enforce that the reconstructed opcode value from the // opcode bits matches. let mut res = vec![Column::linear_combination([ - (columns::IS_AND, F::from_canonical_u8(0x16)), - (columns::IS_OR, F::from_canonical_u8(0x17)), - (columns::IS_XOR, F::from_canonical_u8(0x18)), + (LOGIC_COL_MAP.op.is_and, F::from_canonical_u8(0x16)), + (LOGIC_COL_MAP.op.is_or, F::from_canonical_u8(0x17)), + (LOGIC_COL_MAP.op.is_xor, F::from_canonical_u8(0x18)), ])]; - res.extend(columns::limb_bit_cols_for_input(columns::INPUT0).map(Column::le_bits)); - res.extend(columns::limb_bit_cols_for_input(columns::INPUT1).map(Column::le_bits)); - res.extend(columns::RESULT.map(Column::single)); + res.extend( + LOGIC_COL_MAP + .input0 + .chunks(PACKED_LIMB_BITS) + .map(Column::le_bits), + ); + res.extend( + LOGIC_COL_MAP + .input1 + .chunks(PACKED_LIMB_BITS) + .map(Column::le_bits), + ); + res.extend(LOGIC_COL_MAP.result.map(Column::single)); res } /// CTL filter for logic operations. pub(crate) fn ctl_filter() -> Filter { - Filter::new_simple(Column::sum([ - columns::IS_AND, - columns::IS_OR, - columns::IS_XOR, - ])) + Filter::new_simple(Column::sum(*LOGIC_COL_MAP.op)) } /// Structure representing the Logic STARK, which computes all logic operations. @@ -151,18 +171,18 @@ impl Operation { } = self; let mut row = [F::ZERO; NUM_COLUMNS]; row[match operator { - Op::And => columns::IS_AND, - Op::Or => columns::IS_OR, - Op::Xor => columns::IS_XOR, + Op::And => LOGIC_COL_MAP.op.is_and, + Op::Or => LOGIC_COL_MAP.op.is_or, + Op::Xor => LOGIC_COL_MAP.op.is_xor, }] = F::ONE; for i in 0..256 { - row[columns::INPUT0.start + i] = F::from_bool(input0.bit(i)); - row[columns::INPUT1.start + i] = F::from_bool(input1.bit(i)); + row[LOGIC_COL_MAP.input0[i]] = F::from_bool(input0.bit(i)); + row[LOGIC_COL_MAP.input1[i]] = F::from_bool(input1.bit(i)); } let result_limbs: &[u64] = result.as_ref(); for (i, &limb) in result_limbs.iter().enumerate() { - row[columns::RESULT.start + 2 * i] = F::from_canonical_u32(limb as u32); - row[columns::RESULT.start + 2 * i + 1] = F::from_canonical_u32((limb >> 32) as u32); + row[LOGIC_COL_MAP.result[2 * i]] = F::from_canonical_u32(limb as u32); + row[LOGIC_COL_MAP.result[2 * i + 1]] = F::from_canonical_u32((limb >> 32) as u32); } row } @@ -233,11 +253,12 @@ impl, const D: usize> Stark for LogicStark, P: PackedField, { - let lv = vars.get_local_values(); + let lv: &[P; NUM_COLUMNS] = vars.get_local_values().try_into().unwrap(); + let lv: &LogicColumnsView

= lv.borrow(); - let is_and = lv[columns::IS_AND]; - let is_or = lv[columns::IS_OR]; - let is_xor = lv[columns::IS_XOR]; + let is_and = lv.op.is_and; + let is_or = lv.op.is_or; + let is_xor = lv.op.is_xor; // Flags must be boolean. for &flag in &[is_and, is_or, is_xor] { @@ -256,31 +277,27 @@ impl, const D: usize> Stark for LogicStark, const D: usize> Stark for LogicStark, ) { - let lv = vars.get_local_values(); + let lv: &[ExtensionTarget; NUM_COLUMNS] = vars.get_local_values().try_into().unwrap(); + let lv: &LogicColumnsView> = lv.borrow(); - let is_and = lv[columns::IS_AND]; - let is_or = lv[columns::IS_OR]; - let is_xor = lv[columns::IS_XOR]; + let is_and = lv.op.is_and; + let is_or = lv.op.is_or; + let is_xor = lv.op.is_xor; // Flags must be boolean. for &flag in &[is_and, is_or, is_xor] { @@ -318,28 +336,25 @@ impl, const D: usize> Stark for LogicStark, const D: usize> Stark for LogicStark, const D: usize> Stark for LogicStark(stark) } + + #[test] + fn test_generate_eval_consistency() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type S = LogicStark; + + let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25); + const N_ITERS: usize = 1000; + + for _ in 0..N_ITERS { + for op in [Op::And, Op::Or, Op::Xor] { + // Generate a trace row from an operation on random values. + let operation = Operation::new(op, U256(rng.gen()), U256(rng.gen())); + let expected = operation.result; + let row = operation.into_row::(); + let lv = EvmStarkFrame::from_values(&row, &[F::ZERO; NUM_COLUMNS], &[]); + + let stark = S::default(); + let mut constraint_consumer = ConstraintConsumer::new( + vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)], + F::ONE, + F::ONE, + F::ONE, + ); + + // Evaluate constraints. + stark.eval_packed_generic(&lv, &mut constraint_consumer); + for acc in constraint_consumer.accumulators() { + assert_eq!(acc, F::ZERO); + } + + // Split each expected U256 limb into two. + let expected_limbs = expected.as_ref().iter().flat_map(|&limb| { + [ + F::from_canonical_u32(limb as u32), + F::from_canonical_u32((limb >> 32) as u32), + ] + }); + + // Check that the result limbs match the expected limbs. + assert!(expected_limbs + .zip_eq(&row[LOGIC_COL_MAP.result[0]..]) + .all(|(x, &y)| x == y)); + } + } + } } From 653b7e4ea42cbf77d6b723dd51a2bb41f14919eb Mon Sep 17 00:00:00 2001 From: Robin Salen <30937548+Nashtare@users.noreply.github.com> Date: Wed, 3 Jul 2024 00:58:08 +0900 Subject: [PATCH 4/6] fix: properly log final result when due (#352) --- zero_bin/leader/src/client.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/zero_bin/leader/src/client.rs b/zero_bin/leader/src/client.rs index e3be064bc..932e195cc 100644 --- a/zero_bin/leader/src/client.rs +++ b/zero_bin/leader/src/client.rs @@ -46,12 +46,6 @@ pub(crate) async fn client_main( ) .await?; - if cfg!(feature = "test_only") { - info!("All proof witnesses have been generated successfully."); - } else { - info!("All proofs have been generated successfully."); - } - // If `keep_intermediate_proofs` is not set we only keep the last block // proof from the interval. It contains all the necessary information to // verify the whole sequence. @@ -66,6 +60,12 @@ pub(crate) async fn client_main( runtime.close().await?; let proved_blocks = proved_blocks?; + if cfg!(feature = "test_only") { + info!("All proof witnesses have been generated successfully."); + } else { + info!("All proofs have been generated successfully."); + } + if params.keep_intermediate_proofs { if params.proof_output_dir.is_some() { // All proof files (including intermediary) are written to disk and kept From 043ab683bb745ef6e414a68fdfcdd1cbe5946ac9 Mon Sep 17 00:00:00 2001 From: Robin Salen <30937548+Nashtare@users.noreply.github.com> Date: Sat, 6 Jul 2024 05:00:19 +0900 Subject: [PATCH 5/6] fix: Check valid range for s and add test (#363) --- .../kernel/asm/curve/secp256k1/ecrecover.asm | 4 ++++ .../asm/transactions/common_decoding.asm | 8 ++++++++ .../src/cpu/kernel/constants/mod.rs | 7 ++++++- .../transaction_parsing/parse_type_0_txn.rs | 19 +++++++++++++++++++ 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/evm_arithmetization/src/cpu/kernel/asm/curve/secp256k1/ecrecover.asm b/evm_arithmetization/src/cpu/kernel/asm/curve/secp256k1/ecrecover.asm index c11031004..63faacdeb 100644 --- a/evm_arithmetization/src/cpu/kernel/asm/curve/secp256k1/ecrecover.asm +++ b/evm_arithmetization/src/cpu/kernel/asm/curve/secp256k1/ecrecover.asm @@ -175,6 +175,10 @@ pubkey_to_addr: PUSH @SECP_SCALAR %endmacro +%macro secp_scalar_half + PUSH @SECP_SCALAR_HALF +%endmacro + // Return u256::MAX which is used to indicate the input was invalid. %macro ecrecover_invalid_input // stack: retdest diff --git a/evm_arithmetization/src/cpu/kernel/asm/transactions/common_decoding.asm b/evm_arithmetization/src/cpu/kernel/asm/transactions/common_decoding.asm index 223e0a62e..d89293998 100644 --- a/evm_arithmetization/src/cpu/kernel/asm/transactions/common_decoding.asm +++ b/evm_arithmetization/src/cpu/kernel/asm/transactions/common_decoding.asm @@ -153,6 +153,14 @@ // stack: rlp_addr %decode_rlp_scalar %stack (rlp_addr, s) -> (s, rlp_addr) + + // EIP-2: Check that s is within valid range. + DUP1 + %secp_scalar_half + // stack: ceil(N/2), s, s, rlp_addr + %assert_gt + + // stack: s, rlp_addr %mstore_txn_field(@TXN_FIELD_S) // stack: rlp_addr %endmacro diff --git a/evm_arithmetization/src/cpu/kernel/constants/mod.rs b/evm_arithmetization/src/cpu/kernel/constants/mod.rs index f61686559..19da9a804 100644 --- a/evm_arithmetization/src/cpu/kernel/constants/mod.rs +++ b/evm_arithmetization/src/cpu/kernel/constants/mod.rs @@ -129,7 +129,7 @@ const HASH_CONSTANTS: [(&str, [u8; 32]); 2] = [ ), ]; -const EC_CONSTANTS: [(&str, [u8; 32]); 24] = [ +const EC_CONSTANTS: [(&str, [u8; 32]); 25] = [ ( "U256_MAX", hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), @@ -204,6 +204,11 @@ const EC_CONSTANTS: [(&str, [u8; 32]); 24] = [ "SECP_SCALAR", hex!("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"), ), + ( + "SECP_SCALAR_HALF", + // Corresponds to `ceil(SECP_SCALAR / 2)`. + hex!("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1"), + ), ( "SECP_GLV_BETA", hex!("7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee"), diff --git a/evm_arithmetization/src/cpu/kernel/tests/transaction_parsing/parse_type_0_txn.rs b/evm_arithmetization/src/cpu/kernel/tests/transaction_parsing/parse_type_0_txn.rs index 17693a17b..fa51c3f9f 100644 --- a/evm_arithmetization/src/cpu/kernel/tests/transaction_parsing/parse_type_0_txn.rs +++ b/evm_arithmetization/src/cpu/kernel/tests/transaction_parsing/parse_type_0_txn.rs @@ -68,3 +68,22 @@ fn process_type_0_txn() -> Result<()> { Ok(()) } + +#[test] +fn process_type_0_txn_invalid_sig() -> Result<()> { + let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"]; + let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"]; + + let retaddr = 0xDEADBEEFu32.into(); + let mut interpreter: Interpreter = Interpreter::new(process_type_0_txn, vec![retaddr]); + + // Same transaction as `process_type_0_txn()`, with the exception that the `s` + // component in the signature is flipped (i.e. `s' = N - s`, where `N` is the + // order of the SECP256k1 prime subgroup). + interpreter.extend_memory_segment_bytes(Segment::RlpRaw, hex!("f861050a8255f0940000000000000000000000000000000000000000648242421ca07c5c61ed975ebd286f6b027b8c504842e50a47d318e1e801719dd744fe93e6c6a0e184aee64a822ab1e8a00d0faa36e0c408f99e2ca41c87ec8b557e9be8f0949f").to_vec()); + + let result = interpreter.run(); + assert!(result.is_err()); + + Ok(()) +} From 341c322d989a25b30511a7b7120bf2849fa3d0f1 Mon Sep 17 00:00:00 2001 From: Marko Atanasievski Date: Mon, 8 Jul 2024 11:54:54 +0200 Subject: [PATCH 6/6] feat: add caching for `get_block` (#346) * feat: cached provider * fix: optimize hash fetching * fix: rename provider * fix: optimize * fix: comment * fix: clippy --- Cargo.lock | 1 + Cargo.toml | 1 + mpt_trie/src/trie_hashing.rs | 1 + zero_bin/leader/src/client.rs | 12 ++-- zero_bin/rpc/Cargo.toml | 1 + zero_bin/rpc/src/jerigon.rs | 12 ++-- zero_bin/rpc/src/lib.rs | 48 ++++++++++------ zero_bin/rpc/src/main.rs | 9 ++- zero_bin/rpc/src/native/mod.rs | 21 +++---- zero_bin/rpc/src/native/state.rs | 17 +++--- zero_bin/rpc/src/native/txn.rs | 2 +- zero_bin/rpc/src/provider.rs | 94 ++++++++++++++++++++++++++++++++ 12 files changed, 173 insertions(+), 46 deletions(-) create mode 100644 zero_bin/rpc/src/provider.rs diff --git a/Cargo.lock b/Cargo.lock index dd96f5146..a2b0f7e02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4069,6 +4069,7 @@ dependencies = [ "compat", "evm_arithmetization", "futures", + "lru", "mpt_trie", "primitive-types 0.12.2", "prover", diff --git a/Cargo.toml b/Cargo.toml index aff00a946..b64e627fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } uint = "0.9.5" url = "2.5.2" +lru = "0.12.3" # zero-bin related dependencies ops = { path = "zero_bin/ops" } diff --git a/mpt_trie/src/trie_hashing.rs b/mpt_trie/src/trie_hashing.rs index 9502ebe1c..60890aae2 100644 --- a/mpt_trie/src/trie_hashing.rs +++ b/mpt_trie/src/trie_hashing.rs @@ -121,6 +121,7 @@ mod tests { const NUM_INSERTS_FOR_ETH_TRIE_CRATE_MASSIVE_TEST: usize = 1000; const NODES_PER_BRANCH_FOR_HASH_REPLACEMENT_TEST: usize = 200; + #[allow(dead_code)] #[derive(Copy, Clone, Debug)] struct U256Rlpable(U256); diff --git a/zero_bin/leader/src/client.rs b/zero_bin/leader/src/client.rs index 932e195cc..555bc74aa 100644 --- a/zero_bin/leader/src/client.rs +++ b/zero_bin/leader/src/client.rs @@ -34,12 +34,14 @@ pub(crate) async fn client_main( block_interval: BlockInterval, mut params: ProofParams, ) -> Result<()> { + let cached_provider = rpc::provider::CachedProvider::new(build_http_retry_provider( + rpc_params.rpc_url.clone(), + rpc_params.backoff, + rpc_params.max_retries, + )); + let prover_input = rpc::prover_input( - &build_http_retry_provider( - rpc_params.rpc_url, - rpc_params.backoff, - rpc_params.max_retries, - ), + &cached_provider, block_interval, params.checkpoint_block_number.into(), rpc_params.rpc_type, diff --git a/zero_bin/rpc/Cargo.toml b/zero_bin/rpc/Cargo.toml index 0cf1a2d6c..0bb5a2df3 100644 --- a/zero_bin/rpc/Cargo.toml +++ b/zero_bin/rpc/Cargo.toml @@ -23,6 +23,7 @@ futures = { workspace = true } url = { workspace = true } __compat_primitive_types = { workspace = true } tower = { workspace = true, features = ["retry"] } +lru = { workspace = true } # Local dependencies compat = { workspace = true } diff --git a/zero_bin/rpc/src/jerigon.rs b/zero_bin/rpc/src/jerigon.rs index d1d29ed00..6a04c331e 100644 --- a/zero_bin/rpc/src/jerigon.rs +++ b/zero_bin/rpc/src/jerigon.rs @@ -9,6 +9,7 @@ use trace_decoder::trace_protocol::{ }; use super::fetch_other_block_data; +use crate::provider::CachedProvider; /// Transaction traces retrieved from Erigon zeroTracer. #[derive(Debug, Deserialize)] @@ -23,7 +24,7 @@ pub struct ZeroTxResult { pub struct ZeroBlockWitness(TrieCompact); pub async fn block_prover_input( - provider: ProviderT, + cached_provider: &CachedProvider, target_block_id: BlockId, checkpoint_state_trie_root: B256, ) -> anyhow::Result @@ -32,7 +33,8 @@ where TransportT: Transport + Clone, { // Grab trace information - let tx_results = provider + let tx_results = cached_provider + .as_provider() .raw_request::<_, Vec>( "debug_traceBlockByNumber".into(), (target_block_id, json!({"tracer": "zeroTracer"})), @@ -40,12 +42,14 @@ where .await?; // Grab block witness info (packed as combined trie pre-images) - let block_witness = provider + let block_witness = cached_provider + .as_provider() .raw_request::<_, ZeroBlockWitness>("eth_getWitness".into(), vec![target_block_id]) .await?; let other_data = - fetch_other_block_data(provider, target_block_id, checkpoint_state_trie_root).await?; + fetch_other_block_data(cached_provider, target_block_id, checkpoint_state_trie_root) + .await?; // Assemble Ok(BlockProverInput { diff --git a/zero_bin/rpc/src/lib.rs b/zero_bin/rpc/src/lib.rs index 130b791e1..4f3646a45 100644 --- a/zero_bin/rpc/src/lib.rs +++ b/zero_bin/rpc/src/lib.rs @@ -15,8 +15,11 @@ use zero_bin_common::block_interval::BlockInterval; pub mod jerigon; pub mod native; +pub mod provider; pub mod retry; +use crate::provider::CachedProvider; + const PREVIOUS_HASHES_COUNT: usize = 256; /// The RPC type. @@ -28,7 +31,7 @@ pub enum RpcType { /// Obtain the prover input for a given block interval pub async fn prover_input( - provider: &ProviderT, + cached_provider: &CachedProvider, block_interval: BlockInterval, checkpoint_block_id: BlockId, rpc_type: RpcType, @@ -38,10 +41,9 @@ where TransportT: Transport + Clone, { // Grab interval checkpoint block state trie - let checkpoint_state_trie_root = provider + let checkpoint_state_trie_root = cached_provider .get_block(checkpoint_block_id, BlockTransactionsKind::Hashes) .await? - .context("block does not exist")? .header .state_root; @@ -52,10 +54,12 @@ where let block_id = BlockId::Number(BlockNumberOrTag::Number(block_num)); let block_prover_input = match rpc_type { RpcType::Jerigon => { - jerigon::block_prover_input(&provider, block_id, checkpoint_state_trie_root).await? + jerigon::block_prover_input(cached_provider, block_id, checkpoint_state_trie_root) + .await? } RpcType::Native => { - native::block_prover_input(&provider, block_id, checkpoint_state_trie_root).await? + native::block_prover_input(cached_provider, block_id, checkpoint_state_trie_root) + .await? } }; @@ -68,7 +72,7 @@ where /// Fetches other block data async fn fetch_other_block_data( - provider: ProviderT, + cached_provider: &CachedProvider, target_block_id: BlockId, checkpoint_state_trie_root: B256, ) -> anyhow::Result @@ -76,27 +80,35 @@ where ProviderT: Provider, TransportT: Transport + Clone, { - let target_block = provider + let target_block = cached_provider .get_block(target_block_id, BlockTransactionsKind::Hashes) - .await? - .context("target block does not exist")?; + .await?; let target_block_number = target_block .header .number .context("target block is missing field `number`")?; - let chain_id = provider.get_chain_id().await?; + let chain_id = cached_provider.as_provider().get_chain_id().await?; + + // For one block, we will fetch 128 previous blocks to get hashes instead of + // 256. But for two consecutive blocks (odd and even) we would fetch 256 + // previous blocks in total. To overcome this, we add an offset so that we + // always start fetching from an odd index and eventually skip the additional + // block for an even `target_block_number`. + let odd_offset: i128 = target_block_number as i128 % 2; let previous_block_numbers = - std::iter::successors(Some(target_block_number as i128 - 1), |&it| Some(it - 1)) - .take(PREVIOUS_HASHES_COUNT) - .filter(|i| *i >= 0) - .collect::>(); + std::iter::successors(Some(target_block_number as i128 - 1 + odd_offset), |&it| { + Some(it - 1) + }) + .take(PREVIOUS_HASHES_COUNT) + .filter(|i| *i >= 0) + .collect::>(); let concurrency = previous_block_numbers.len(); let collected_hashes = futures::stream::iter( previous_block_numbers .chunks(2) // we get hash for previous and current block with one request .map(|block_numbers| { - let provider = &provider; + let cached_provider = &cached_provider; let block_num = &block_numbers[0]; let previos_block_num = if block_numbers.len() > 1 { Some(block_numbers[1]) @@ -105,11 +117,10 @@ where None }; async move { - let block = provider + let block = cached_provider .get_block((*block_num as u64).into(), BlockTransactionsKind::Hashes) .await - .context("couldn't get block")? - .context("no such block")?; + .context("couldn't get block")?; anyhow::Ok([ (block.header.hash, Some(*block_num)), (Some(block.header.parent_hash), previos_block_num), @@ -126,6 +137,7 @@ where collected_hashes .into_iter() .flatten() + .skip(odd_offset as usize) .for_each(|(hash, block_num)| { if let (Some(hash), Some(block_num)) = (hash, block_num) { // Most recent previous block hash is expected at the end of the array diff --git a/zero_bin/rpc/src/main.rs b/zero_bin/rpc/src/main.rs index 8e26498a6..197e387de 100644 --- a/zero_bin/rpc/src/main.rs +++ b/zero_bin/rpc/src/main.rs @@ -2,6 +2,7 @@ use std::io; use alloy::rpc::types::eth::BlockId; use clap::{Parser, ValueHint}; +use rpc::provider::CachedProvider; use rpc::{retry::build_http_retry_provider, RpcType}; use tracing_subscriber::{prelude::*, EnvFilter}; use url::Url; @@ -53,9 +54,15 @@ impl Cli { checkpoint_block_number.unwrap_or((start_block - 1).into()); let block_interval = BlockInterval::Range(start_block..end_block + 1); + let cached_provider = CachedProvider::new(build_http_retry_provider( + rpc_url.clone(), + backoff, + max_retries, + )); + // Retrieve prover input from the Erigon node let prover_input = rpc::prover_input( - &build_http_retry_provider(rpc_url, backoff, max_retries), + &cached_provider, block_interval, checkpoint_block_number, rpc_type, diff --git a/zero_bin/rpc/src/native/mod.rs b/zero_bin/rpc/src/native/mod.rs index 75de3d5de..7d0af2de4 100644 --- a/zero_bin/rpc/src/native/mod.rs +++ b/zero_bin/rpc/src/native/mod.rs @@ -6,11 +6,12 @@ use alloy::{ rpc::types::eth::{BlockId, BlockTransactionsKind}, transports::Transport, }; -use anyhow::Context as _; use futures::try_join; use prover::BlockProverInput; use trace_decoder::trace_protocol::BlockTrace; +use crate::provider::CachedProvider; + mod state; mod txn; @@ -18,7 +19,7 @@ type CodeDb = HashMap<__compat_primitive_types::H256, Vec>; /// Fetches the prover input for the given BlockId. pub async fn block_prover_input( - provider: &ProviderT, + provider: &CachedProvider, block_number: BlockId, checkpoint_state_trie_root: B256, ) -> anyhow::Result @@ -27,8 +28,8 @@ where TransportT: Transport + Clone, { let (block_trace, other_data) = try_join!( - process_block_trace(&provider, block_number), - crate::fetch_other_block_data(&provider, block_number, checkpoint_state_trie_root,) + process_block_trace(provider, block_number), + crate::fetch_other_block_data(provider, block_number, checkpoint_state_trie_root,) )?; Ok(BlockProverInput { @@ -39,20 +40,20 @@ where /// Processes the block with the given block number and returns the block trace. async fn process_block_trace( - provider: &ProviderT, + cached_provider: &CachedProvider, block_number: BlockId, ) -> anyhow::Result where ProviderT: Provider, TransportT: Transport + Clone, { - let block = provider + let block = cached_provider .get_block(block_number, BlockTransactionsKind::Full) - .await? - .context("target block does not exist")?; + .await?; - let (code_db, txn_info) = txn::process_transactions(&block, provider).await?; - let trie_pre_images = state::process_state_witness(provider, block, &txn_info).await?; + let (code_db, txn_info) = + txn::process_transactions(&block, cached_provider.as_provider()).await?; + let trie_pre_images = state::process_state_witness(cached_provider, block, &txn_info).await?; Ok(BlockTrace { txn_info, diff --git a/zero_bin/rpc/src/native/state.rs b/zero_bin/rpc/src/native/state.rs index d69b48cb9..331647c82 100644 --- a/zero_bin/rpc/src/native/state.rs +++ b/zero_bin/rpc/src/native/state.rs @@ -14,11 +14,12 @@ use trace_decoder::trace_protocol::{ SeparateTriePreImages, TrieDirect, TxnInfo, }; +use crate::provider::CachedProvider; use crate::Compat; /// Processes the state witness for the given block. pub async fn process_state_witness( - provider: &ProviderT, + cached_provider: &CachedProvider, block: Block, txn_infos: &[TxnInfo], ) -> anyhow::Result @@ -32,15 +33,15 @@ where .header .number .context("Block number not returned with block")?; - let prev_state_root = provider + let prev_state_root = cached_provider .get_block((block_number - 1).into(), BlockTransactionsKind::Hashes) .await? - .context("Failed to get previous block")? .header .state_root; let (state, storage_proofs) = - generate_state_witness(prev_state_root, state_access, provider, block_number).await?; + generate_state_witness(prev_state_root, state_access, cached_provider, block_number) + .await?; Ok(BlockTraceTriePreImages::Separate(SeparateTriePreImages { state: SeparateTriePreImage::Direct(TrieDirect(state.build())), @@ -97,7 +98,7 @@ pub fn process_states_access( async fn generate_state_witness( prev_state_root: B256, accounts_state: HashMap>, - provider: &ProviderT, + cached_provider: &CachedProvider, block_number: u64, ) -> anyhow::Result<( PartialTrieBuilder, @@ -111,7 +112,7 @@ where let mut storage_proofs = HashMap::>::new(); let (account_proofs, next_account_proofs) = - fetch_proof_data(accounts_state, provider, block_number).await?; + fetch_proof_data(accounts_state, cached_provider, block_number).await?; // Insert account proofs for (address, proof) in account_proofs.into_iter() { @@ -146,7 +147,7 @@ where /// Fetches the proof data for the given accounts and associated storage keys. async fn fetch_proof_data( accounts_state: HashMap>, - provider: &ProviderT, + provider: &CachedProvider, block_number: u64, ) -> anyhow::Result<( Vec<(Address, EIP1186AccountProofResponse)>, @@ -161,6 +162,7 @@ where .into_iter() .map(|(address, keys)| async move { let proof = provider + .as_provider() .get_proof(address, keys.into_iter().collect()) .block_id((block_number - 1).into()) .await @@ -173,6 +175,7 @@ where .into_iter() .map(|(address, keys)| async move { let proof = provider + .as_provider() .get_proof(address, keys.into_iter().collect()) .block_id(block_number.into()) .await diff --git a/zero_bin/rpc/src/native/txn.rs b/zero_bin/rpc/src/native/txn.rs index 9e1e8721a..40af45523 100644 --- a/zero_bin/rpc/src/native/txn.rs +++ b/zero_bin/rpc/src/native/txn.rs @@ -40,7 +40,7 @@ where .as_transactions() .context("No transactions in block")? .iter() - .map(|tx| super::txn::process_transaction(provider, tx)) + .map(|tx| process_transaction(provider, tx)) .collect::>() .try_fold( (HashMap::new(), Vec::new()), diff --git a/zero_bin/rpc/src/provider.rs b/zero_bin/rpc/src/provider.rs new file mode 100644 index 000000000..fc782ff43 --- /dev/null +++ b/zero_bin/rpc/src/provider.rs @@ -0,0 +1,94 @@ +use std::sync::Arc; + +use alloy::primitives::BlockHash; +use alloy::rpc::types::{Block, BlockId, BlockTransactionsKind}; +use alloy::{providers::Provider, transports::Transport}; +use anyhow::Context; +use tokio::sync::Mutex; + +const CACHE_SIZE: usize = 1024; + +/// Wrapper around alloy provider to cache blocks and other +/// frequently used data. +pub struct CachedProvider { + provider: ProviderT, + blocks_by_number: Arc>>, + blocks_by_hash: Arc>>, + _phantom: std::marker::PhantomData, +} + +impl CachedProvider +where + ProviderT: Provider, + TransportT: Transport + Clone, +{ + pub fn new(provider: ProviderT) -> Self { + Self { + provider, + blocks_by_number: Arc::new(Mutex::new(lru::LruCache::new( + std::num::NonZero::new(CACHE_SIZE).unwrap(), + ))), + blocks_by_hash: Arc::new(Mutex::new(lru::LruCache::new( + std::num::NonZero::new(CACHE_SIZE).unwrap(), + ))), + _phantom: std::marker::PhantomData, + } + } + + pub fn as_mut_provider(&mut self) -> &mut ProviderT { + &mut self.provider + } + + pub fn as_provider(&self) -> &ProviderT { + &self.provider + } + + /// Retrieves block by number or hash, caching it if it's not already + /// cached. + pub async fn get_block( + &self, + id: BlockId, + kind: BlockTransactionsKind, + ) -> anyhow::Result { + let cached_block = match id { + BlockId::Hash(hash) => { + let block_num = self + .blocks_by_hash + .lock() + .await + .get(&hash.block_hash) + .copied(); + if let Some(block_num) = block_num { + self.blocks_by_number.lock().await.get(&block_num).cloned() + } else { + None + } + } + BlockId::Number(alloy::rpc::types::BlockNumberOrTag::Number(number)) => { + self.blocks_by_number.lock().await.get(&number).cloned() + } + _ => None, + }; + + if let Some(block) = cached_block { + Ok(block) + } else { + let block = self + .provider + .get_block(id, kind) + .await? + .context(format!("target block {:?} does not exist", id))?; + + if let Some(block_num) = block.header.number { + self.blocks_by_number + .lock() + .await + .put(block_num, block.clone()); + if let Some(hash) = block.header.hash { + self.blocks_by_hash.lock().await.put(hash, block_num); + } + } + Ok(block) + } + } +}