diff --git a/jolt-core/benches/commit.rs b/jolt-core/benches/commit.rs index a3fc63d26..03a6b3834 100644 --- a/jolt-core/benches/commit.rs +++ b/jolt-core/benches/commit.rs @@ -5,6 +5,7 @@ use jolt_core::poly::commitment::commitment_scheme::{BatchType, CommitShape, Com use jolt_core::poly::commitment::hyperkzg::HyperKZG; use jolt_core::poly::commitment::kzg::CommitMode; use jolt_core::poly::commitment::zeromorph::Zeromorph; +use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_chacha::ChaCha20Rng; use rand_core::{RngCore, SeedableRng}; @@ -12,7 +13,7 @@ const SRS_SIZE: usize = 1 << 10; // Sets up the benchmark by generating leaves and computing known products // and allows configuring the percentage of ones in the leaves -fn setup_bench( +fn setup_bench( num_layers: usize, layer_size: usize, percentage_ones: u32, @@ -24,8 +25,9 @@ fn setup_bench( Vec, ) where - PCS: CommitmentScheme, + PCS: CommitmentScheme, F: JoltField, + ProofTranscript: Transcript, { assert!( percentage_ones <= 100, @@ -60,7 +62,7 @@ where (leaves, setup, known_products) } -fn benchmark_commit( +fn benchmark_commit( c: &mut Criterion, name: &str, num_layer: usize, @@ -68,10 +70,12 @@ fn benchmark_commit( threshold: u32, batch_type: BatchType, ) where - PCS: CommitmentScheme, // Generic over PCS implementing CommitmentScheme for field F - F: JoltField, // Generic over a field F + PCS: CommitmentScheme, // Generic over PCS implementing CommitmentScheme for field F + F: JoltField, // Generic over a field F + ProofTranscript: Transcript, { - let (leaves, setup, _) = setup_bench::(num_layer, layer_size, threshold); + let (leaves, setup, _) = + setup_bench::(num_layer, layer_size, threshold); let leaves = leaves .iter() .map(|layer| layer.as_slice()) @@ -97,7 +101,7 @@ fn main() { let num_layers = 50; let layer_size = 1 << 10; // Zeromorph - benchmark_commit::, Fr>( + benchmark_commit::, Fr, KeccakTranscript>( &mut criterion, "Zeromorph", num_layers, @@ -105,7 +109,7 @@ fn main() { 90, BatchType::Big, ); - benchmark_commit::, Fr>( + benchmark_commit::, Fr, KeccakTranscript>( &mut criterion, "HyperKZG", num_layers, @@ -113,7 +117,7 @@ fn main() { 90, BatchType::GrandProduct, ); - benchmark_commit::, Fr>( + benchmark_commit::, Fr, KeccakTranscript>( &mut criterion, "HyperKZG", num_layers, diff --git a/jolt-core/benches/grand_product.rs b/jolt-core/benches/grand_product.rs index cc5128a14..d6c6a2949 100644 --- a/jolt-core/benches/grand_product.rs +++ b/jolt-core/benches/grand_product.rs @@ -10,7 +10,7 @@ use jolt_core::subprotocols::grand_product::{ use jolt_core::subprotocols::grand_product_quarks::{ QuarkGrandProduct, QuarkGrandProductConfig, QuarkHybridLayerDepth, }; -use jolt_core::utils::transcript::ProofTranscript; +use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_chacha::ChaCha20Rng; use rand_core::{RngCore, SeedableRng}; @@ -26,7 +26,7 @@ struct BenchConfig { // Sets up the benchmark by generating leaves and computing known products // and allows configuring the percentage of ones in the leaves -fn setup_bench( +fn setup_bench( num_batches: usize, layer_size: usize, threshold: u32, @@ -38,8 +38,9 @@ fn setup_bench( Vec, ) where - PCS: CommitmentScheme, + PCS: CommitmentScheme, F: JoltField, + ProofTranscript: Transcript, { assert!( threshold <= 100, @@ -74,17 +75,21 @@ where (leaves, setup, known_products) } -fn benchmark_prove( +fn benchmark_prove( c: &mut Criterion, config: BenchConfig, grand_products_config: G::Config, ) where - PCS: CommitmentScheme, + PCS: CommitmentScheme, F: JoltField, - G: BatchedGrandProduct>>, + G: BatchedGrandProduct>>, + ProofTranscript: Transcript, { - let (leaves, setup, _) = - setup_bench::(config.num_layers, config.layer_size, config.percentage_ones); + let (leaves, setup, _) = setup_bench::( + config.num_layers, + config.layer_size, + config.percentage_ones, + ); let mut grand_product = G::construct_with_config(leaves, grand_products_config); @@ -97,9 +102,9 @@ fn benchmark_prove( b.iter(|| { // Prove the grand product let mut transcript = ProofTranscript::new(b"test_transcript"); - let mut prover_accumulator: ProverOpeningAccumulator = + let mut prover_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); - let _proof: BatchedGrandProductProof = grand_product + let _proof: BatchedGrandProductProof = grand_product .prove_grand_product( Some(&mut prover_accumulator), &mut transcript, @@ -111,21 +116,26 @@ fn benchmark_prove( ); } -fn benchmark_verify( +fn benchmark_verify( c: &mut Criterion, config: BenchConfig, grand_products_config: G::Config, ) where - PCS: CommitmentScheme, + PCS: CommitmentScheme, F: JoltField, - G: BatchedGrandProduct>>, + G: BatchedGrandProduct>>, + ProofTranscript: Transcript, { - let (leaves, setup, known_products) = - setup_bench::(config.num_layers, config.layer_size, config.percentage_ones); + let (leaves, setup, known_products) = setup_bench::( + config.num_layers, + config.layer_size, + config.percentage_ones, + ); let mut transcript = ProofTranscript::new(b"test_transcript"); let mut grand_product = G::construct_with_config(leaves, grand_products_config); - let mut prover_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); + let mut prover_accumulator: ProverOpeningAccumulator = + ProverOpeningAccumulator::new(); let (proof, r_prover) = grand_product.prove_grand_product( Some(&mut prover_accumulator), &mut transcript, @@ -141,7 +151,7 @@ fn benchmark_verify( b.iter(|| { // Verify the grand product transcript = ProofTranscript::new(b"test_transcript"); - let mut verifier_accumulator: VerifierOpeningAccumulator = + let mut verifier_accumulator: VerifierOpeningAccumulator = VerifierOpeningAccumulator::new(); let (_, r_verifier) = QuarkGrandProduct::verify_grand_product( &proof, @@ -157,17 +167,18 @@ fn benchmark_verify( ); } -fn benchmark_prove_and_verify( +fn benchmark_prove_and_verify( c: &mut Criterion, config: BenchConfig, grand_product_config: G::Config, ) where - PCS: CommitmentScheme, + PCS: CommitmentScheme, F: JoltField, - G: BatchedGrandProduct>>, + G: BatchedGrandProduct>>, + ProofTranscript: Transcript, { - benchmark_prove::(c, config, grand_product_config); - benchmark_verify::(c, config, grand_product_config); + benchmark_prove::(c, config, grand_product_config); + benchmark_verify::(c, config, grand_product_config); } fn main() { @@ -184,15 +195,21 @@ fn main() { }; // Hybrid config.name = "HyperKZG Hybrid"; - benchmark_prove_and_verify::, Fr, QuarkGrandProduct>( - &mut c, - config, - QuarkGrandProductConfig::default(), - ); + benchmark_prove_and_verify::< + HyperKZG, + Fr, + QuarkGrandProduct, + KeccakTranscript, + >(&mut c, config, QuarkGrandProductConfig::default()); // Hybrid min config.name = "HyperKZG Hybrid Min Crossover"; - benchmark_prove_and_verify::, Fr, QuarkGrandProduct>( + benchmark_prove_and_verify::< + HyperKZG, + Fr, + QuarkGrandProduct, + KeccakTranscript, + >( &mut c, config, QuarkGrandProductConfig { @@ -200,7 +217,12 @@ fn main() { }, ); config.name = "HyperKZG Hybrid Min Crossover"; - benchmark_prove_and_verify::, Fr, QuarkGrandProduct>( + benchmark_prove_and_verify::< + HyperKZG, + Fr, + QuarkGrandProduct, + KeccakTranscript, + >( &mut c, BenchConfig { percentage_ones: 10, @@ -213,7 +235,12 @@ fn main() { // Hybrid max config.name = "HyperKZG Hybrid Max Crossover"; - benchmark_prove_and_verify::, Fr, QuarkGrandProduct>( + benchmark_prove_and_verify::< + HyperKZG, + Fr, + QuarkGrandProduct, + KeccakTranscript, + >( &mut c, config, QuarkGrandProductConfig { @@ -223,11 +250,19 @@ fn main() { // GKR config.name = "HyperKZG GKR"; - benchmark_prove_and_verify::, Fr, BatchedDenseGrandProduct>( + benchmark_prove_and_verify::< + HyperKZG, + Fr, + BatchedDenseGrandProduct, + KeccakTranscript, + >( &mut c, config, - as BatchedGrandProduct>>::Config::default( - ), + as BatchedGrandProduct< + Fr, + HyperKZG, + KeccakTranscript, + >>::Config::default(), ); c.final_summary(); diff --git a/jolt-core/src/benches/bench.rs b/jolt-core/src/benches/bench.rs index 3043bcda2..f4f6da289 100644 --- a/jolt-core/src/benches/bench.rs +++ b/jolt-core/src/benches/bench.rs @@ -6,6 +6,7 @@ use crate::poly::commitment::commitment_scheme::CommitmentScheme; use crate::poly::commitment::hyperkzg::HyperKZG; use crate::poly::commitment::hyrax::HyraxScheme; use crate::poly::commitment::zeromorph::Zeromorph; +use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Bn254, Fr, G1Projective}; use serde::Serialize; @@ -34,52 +35,71 @@ pub fn benchmarks( ) -> Vec<(tracing::Span, Box)> { match pcs_type { PCSType::Hyrax => match bench_type { - BenchType::Sha2 => sha2::>(), - BenchType::Sha3 => sha3::>(), - BenchType::Sha2Chain => sha2chain::>(), - BenchType::Fibonacci => fibonacci::>(), + BenchType::Sha2 => { + sha2::, KeccakTranscript>() + } + BenchType::Sha3 => { + sha3::, KeccakTranscript>() + } + BenchType::Sha2Chain => { + sha2chain::, KeccakTranscript>() + } + BenchType::Fibonacci => { + fibonacci::, KeccakTranscript>() + } _ => panic!("BenchType does not have a mapping"), }, PCSType::Zeromorph => match bench_type { - BenchType::Sha2 => sha2::>(), - BenchType::Sha3 => sha3::>(), - BenchType::Sha2Chain => sha2chain::>(), - BenchType::Fibonacci => fibonacci::>(), + BenchType::Sha2 => sha2::, KeccakTranscript>(), + BenchType::Sha3 => sha3::, KeccakTranscript>(), + BenchType::Sha2Chain => { + sha2chain::, KeccakTranscript>() + } + BenchType::Fibonacci => { + fibonacci::, KeccakTranscript>() + } _ => panic!("BenchType does not have a mapping"), }, PCSType::HyperKZG => match bench_type { - BenchType::Sha2 => sha2::>(), - BenchType::Sha3 => sha3::>(), - BenchType::Sha2Chain => sha2chain::>(), - BenchType::Fibonacci => fibonacci::>(), + BenchType::Sha2 => sha2::, KeccakTranscript>(), + BenchType::Sha3 => sha3::, KeccakTranscript>(), + BenchType::Sha2Chain => { + sha2chain::, KeccakTranscript>() + } + BenchType::Fibonacci => { + fibonacci::, KeccakTranscript>() + } _ => panic!("BenchType does not have a mapping"), }, _ => panic!("PCS Type does not have a mapping"), } } -fn fibonacci() -> Vec<(tracing::Span, Box)> +fn fibonacci() -> Vec<(tracing::Span, Box)> where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { - prove_example::("fibonacci-guest", &9u32) + prove_example::("fibonacci-guest", &9u32) } -fn sha2() -> Vec<(tracing::Span, Box)> +fn sha2() -> Vec<(tracing::Span, Box)> where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { - prove_example::, PCS, F>("sha2-guest", &vec![5u8; 2048]) + prove_example::, PCS, F, ProofTranscript>("sha2-guest", &vec![5u8; 2048]) } -fn sha3() -> Vec<(tracing::Span, Box)> +fn sha3() -> Vec<(tracing::Span, Box)> where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { - prove_example::, PCS, F>("sha3-guest", &vec![5u8; 2048]) + prove_example::, PCS, F, ProofTranscript>("sha3-guest", &vec![5u8; 2048]) } #[allow(dead_code)] @@ -93,13 +113,14 @@ fn serialize_and_print_size(name: &str, item: &impl ark_serialize::CanonicalSeri println!("{:<30} : {:.3} MB", name, file_size_mb); } -fn prove_example( +fn prove_example( example_name: &str, input: &T, ) -> Vec<(tracing::Span, Box)> where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { let mut tasks = Vec::new(); let mut program = host::Program::new(example_name); @@ -109,11 +130,15 @@ where let (bytecode, memory_init) = program.decode(); let (io_device, trace) = program.trace(); - let preprocessing: crate::jolt::vm::JoltPreprocessing = + let preprocessing: crate::jolt::vm::JoltPreprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 22); let (jolt_proof, jolt_commitments, _) = - >::prove(io_device, trace, preprocessing.clone()); + >::prove( + io_device, + trace, + preprocessing.clone(), + ); println!("Proof sizing:"); serialize_and_print_size("jolt_commitments", &jolt_commitments); @@ -146,10 +171,11 @@ where tasks } -fn sha2chain() -> Vec<(tracing::Span, Box)> +fn sha2chain() -> Vec<(tracing::Span, Box)> where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { let mut tasks = Vec::new(); let mut program = host::Program::new("sha2-chain-guest"); @@ -160,11 +186,15 @@ where let (bytecode, memory_init) = program.decode(); let (io_device, trace) = program.trace(); - let preprocessing: crate::jolt::vm::JoltPreprocessing = + let preprocessing: crate::jolt::vm::JoltPreprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 22); let (jolt_proof, jolt_commitments, _) = - >::prove(io_device, trace, preprocessing.clone()); + >::prove( + io_device, + trace, + preprocessing.clone(), + ); let verification_result = RV32IJoltVM::verify(preprocessing, jolt_proof, jolt_commitments, None); assert!( diff --git a/jolt-core/src/jolt/vm/bytecode.rs b/jolt-core/src/jolt/vm/bytecode.rs index 68469cd42..48844513c 100644 --- a/jolt-core/src/jolt/vm/bytecode.rs +++ b/jolt-core/src/jolt/vm/bytecode.rs @@ -20,13 +20,13 @@ use common::to_ram_address; use rayon::prelude::*; +use super::{JoltPolynomials, JoltTraceStep}; +use crate::utils::transcript::Transcript; use crate::{ lasso::memory_checking::{MemoryCheckingProof, MemoryCheckingProver, MemoryCheckingVerifier}, poly::{dense_mlpoly::DensePolynomial, identity_poly::IdentityPolynomial}, }; -use super::{JoltPolynomials, JoltTraceStep}; - #[derive(Default, CanonicalSerialize, CanonicalDeserialize)] pub struct BytecodeStuff { /// Read/write addresses for offline memory-checking. @@ -59,7 +59,8 @@ pub type BytecodeOpenings = BytecodeStuff; /// Note –– PCS: CommitmentScheme bound is not enforced. /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break -pub type BytecodeCommitments = BytecodeStuff; +pub type BytecodeCommitments, ProofTranscript: Transcript> = + BytecodeStuff; impl Initializable> for BytecodeStuff @@ -92,8 +93,8 @@ impl StructuredPolynomialData } } -pub type BytecodeProof = - MemoryCheckingProof, NoExogenousOpenings>; +pub type BytecodeProof = + MemoryCheckingProof, NoExogenousOpenings, ProofTranscript>; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BytecodeRow { @@ -290,7 +291,12 @@ impl BytecodePreprocessing { } } -impl> BytecodeProof { +impl BytecodeProof +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ #[tracing::instrument(skip_all, name = "BytecodePolynomials::new")] pub fn generate_witness( preprocessing: &BytecodePreprocessing, @@ -469,14 +475,16 @@ impl> BytecodeProof { } } -impl MemoryCheckingProver for BytecodeProof +impl MemoryCheckingProver + for BytecodeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type Polynomials = BytecodePolynomials; type Openings = BytecodeOpenings; - type Commitments = BytecodeCommitments; + type Commitments = BytecodeCommitments; type Preprocessing = BytecodePreprocessing; // [virtual_address, elf_address, opcode, rd, rs1, rs2, imm, t] @@ -596,10 +604,12 @@ where } } -impl MemoryCheckingVerifier for BytecodeProof +impl MemoryCheckingVerifier + for BytecodeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { fn compute_verifier_openings( openings: &mut BytecodeOpenings, @@ -695,6 +705,7 @@ mod tests { use crate::{jolt::vm::rv32i_vm::RV32I, poly::commitment::hyrax::HyraxScheme}; use super::*; + use crate::utils::transcript::KeccakTranscript; use ark_bn254::{Fr, G1Projective}; use common::{ constants::MEMORY_OPS_PER_INSTRUCTION, @@ -757,21 +768,26 @@ mod tests { ]; let preprocessing = BytecodePreprocessing::preprocess(program.clone()); - let polys: BytecodePolynomials = - BytecodeProof::>::generate_witness::( - &preprocessing, - &mut trace, - ); + let polys: BytecodePolynomials = BytecodeProof::< + Fr, + HyraxScheme, + KeccakTranscript, + >::generate_witness::( + &preprocessing, &mut trace + ); let (gamma, tau) = (&Fr::from(100), &Fr::from(35)); - let (read_write_leaves, init_final_leaves) = - BytecodeProof::>::compute_leaves( - &preprocessing, - &polys, - &JoltPolynomials::default(), - gamma, - tau, - ); + let (read_write_leaves, init_final_leaves) = BytecodeProof::< + Fr, + HyraxScheme, + KeccakTranscript, + >::compute_leaves( + &preprocessing, + &polys, + &JoltPolynomials::default(), + gamma, + tau, + ); let init_leaves = &init_final_leaves[0]; let read_leaves = &read_write_leaves[0]; let write_leaves = &read_write_leaves[1]; @@ -798,7 +814,9 @@ mod tests { BytecodeRow::new(to_ram_address(2), 8u64, 8u64, 8u64, 8u64, 8u64), BytecodeRow::new(to_ram_address(5), 0u64, 0u64, 0u64, 0u64, 0u64), // no_op: shouldn't exist in pgoram ]; - BytecodeProof::>::validate_bytecode(&program, &trace); + BytecodeProof::, KeccakTranscript>::validate_bytecode( + &program, &trace, + ); } #[test] @@ -814,6 +832,8 @@ mod tests { BytecodeRow::new(to_ram_address(3), 16u64, 16u64, 16u64, 16u64, 16u64), BytecodeRow::new(to_ram_address(2), 8u64, 8u64, 8u64, 8u64, 8u64), ]; - BytecodeProof::>::validate_bytecode(&program, &trace); + BytecodeProof::, KeccakTranscript>::validate_bytecode( + &program, &trace, + ); } } diff --git a/jolt-core/src/jolt/vm/instruction_lookups.rs b/jolt-core/src/jolt/vm/instruction_lookups.rs index e62910f27..bd96fe475 100644 --- a/jolt-core/src/jolt/vm/instruction_lookups.rs +++ b/jolt-core/src/jolt/vm/instruction_lookups.rs @@ -16,6 +16,7 @@ use crate::lasso::memory_checking::{ }; use crate::poly::commitment::commitment_scheme::{BatchType, CommitShape, CommitmentScheme}; use crate::utils::mul_0_1_optimized; +use crate::utils::transcript::Transcript; use crate::{ lasso::memory_checking::{MemoryCheckingProof, MemoryCheckingProver, MemoryCheckingVerifier}, poly::{ @@ -25,11 +26,7 @@ use crate::{ unipoly::{CompressedUniPoly, UniPoly}, }, subprotocols::sumcheck::SumcheckInstanceProof, - utils::{ - errors::ProofVerifyError, - math::Math, - transcript::{AppendToTranscript, ProofTranscript}, - }, + utils::{errors::ProofVerifyError, math::Math, transcript::AppendToTranscript}, }; use super::{JoltCommitments, JoltPolynomials, JoltTraceStep}; @@ -79,8 +76,10 @@ pub type InstructionLookupOpenings = InstructionLookupStuff; /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break /// `alloy_sol_types`. -pub type InstructionLookupCommitments = - InstructionLookupStuff; +pub type InstructionLookupCommitments< + PCS: CommitmentScheme, + ProofTranscript: Transcript, +> = InstructionLookupStuff; impl Initializable> for InstructionLookupStuff @@ -154,19 +153,21 @@ where lookup_outputs_opening: F, } -impl MemoryCheckingProver - for InstructionLookupsProof +impl + MemoryCheckingProver + for InstructionLookupsProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, InstructionSet: JoltInstructionSet, Subtables: JoltSubtableSet, + ProofTranscript: Transcript, { - type ReadWriteGrandProduct = ToggledBatchedGrandProduct; + type ReadWriteGrandProduct = ToggledBatchedGrandProduct; type Polynomials = InstructionLookupPolynomials; type Openings = InstructionLookupOpenings; - type Commitments = InstructionLookupCommitments; + type Commitments = InstructionLookupCommitments; type Preprocessing = InstructionLookupsPreprocessing; @@ -188,8 +189,8 @@ where gamma: &F, tau: &F, ) -> ( - >::Leaves, - >::Leaves, + >::Leaves, + >::Leaves, ) { let gamma_squared = gamma.square(); let num_lookups = polynomials.dim[0].len(); @@ -362,17 +363,19 @@ where b"Instruction lookups check" } - type InitFinalGrandProduct = crate::subprotocols::grand_product::BatchedDenseGrandProduct; + type InitFinalGrandProduct = + crate::subprotocols::grand_product::BatchedDenseGrandProduct; } -impl - MemoryCheckingVerifier - for InstructionLookupsProof +impl + MemoryCheckingVerifier + for InstructionLookupsProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, InstructionSet: JoltInstructionSet, Subtables: JoltSubtableSet, + ProofTranscript: Transcript, { fn compute_verifier_openings( openings: &mut Self::Openings, @@ -459,24 +462,33 @@ pub struct InstructionLookupsProof< PCS, InstructionSet, Subtables, + ProofTranscript, > where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Subtables: JoltSubtableSet, InstructionSet: JoltInstructionSet, + ProofTranscript: Transcript, { _instructions: PhantomData, _subtables: PhantomData, - primary_sumcheck: PrimarySumcheck, - memory_checking: MemoryCheckingProof, NoExogenousOpenings>, + primary_sumcheck: PrimarySumcheck, + memory_checking: MemoryCheckingProof< + F, + PCS, + InstructionLookupOpenings, + NoExogenousOpenings, + ProofTranscript, + >, } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct PrimarySumcheck { - sumcheck_proof: SumcheckInstanceProof, +pub struct PrimarySumcheck { + sumcheck_proof: SumcheckInstanceProof, num_rounds: usize, openings: PrimarySumcheckOpenings, // opening_proof: PCS::BatchedProof, + _marker: PhantomData, } #[derive(Clone)] @@ -560,13 +572,14 @@ impl InstructionLookupsPreprocessing { } } -impl - InstructionLookupsProof +impl + InstructionLookupsProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, InstructionSet: JoltInstructionSet, Subtables: JoltSubtableSet, + ProofTranscript: Transcript, { const NUM_SUBTABLES: usize = Subtables::COUNT; const NUM_INSTRUCTIONS: usize = InstructionSet::COUNT; @@ -576,10 +589,11 @@ where generators: &PCS::Setup, polynomials: &'a JoltPolynomials, preprocessing: &InstructionLookupsPreprocessing, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, - ) -> InstructionLookupsProof { - transcript.append_protocol_name(Self::protocol_name()); + ) -> InstructionLookupsProof { + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let trace_length = polynomials.instruction_lookups.dim[0].len(); let r_eq = transcript.challenge_vector(trace_length.log_2()); @@ -635,6 +649,7 @@ where sumcheck_proof: primary_sumcheck_proof, num_rounds, openings: sumcheck_openings, + _marker: PhantomData, }; let memory_checking = Self::prove_memory_checking( @@ -657,12 +672,13 @@ where pub fn verify( preprocessing: &InstructionLookupsPreprocessing, pcs_setup: &PCS::Setup, - proof: InstructionLookupsProof, - commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + proof: InstructionLookupsProof, + commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let r_eq = transcript.challenge_vector(proof.primary_sumcheck.num_rounds); @@ -850,7 +866,13 @@ where lookup_outputs_poly: &mut DensePolynomial, degree: usize, transcript: &mut ProofTranscript, - ) -> (SumcheckInstanceProof, Vec, Vec, Vec, F) { + ) -> ( + SumcheckInstanceProof, + Vec, + Vec, + Vec, + F, + ) { // Check all polys are the same size let poly_len = eq_poly.len(); memory_polys diff --git a/jolt-core/src/jolt/vm/mod.rs b/jolt-core/src/jolt/vm/mod.rs index 4d6c7497a..2216ecf5c 100644 --- a/jolt-core/src/jolt/vm/mod.rs +++ b/jolt-core/src/jolt/vm/mod.rs @@ -11,6 +11,7 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use common::constants::RAM_START_ADDRESS; use common::rv_trace::NUM_CIRCUIT_FLAGS; use serde::{Deserialize, Serialize}; +use std::marker::PhantomData; use strum::EnumCount; use timestamp_range_check::TimestampRangeCheckStuff; @@ -31,7 +32,7 @@ use crate::poly::dense_mlpoly::DensePolynomial; use crate::r1cs::inputs::{ConstraintInput, R1CSPolynomials, R1CSProof, R1CSStuff}; use crate::utils::errors::ProofVerifyError; use crate::utils::thread::drop_in_background_thread; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use common::{ constants::MEMORY_OPS_PER_INSTRUCTION, rv_trace::{ELFInstruction, JoltDevice, MemoryOp}, @@ -49,10 +50,11 @@ use self::read_write_memory::{ use super::instruction::JoltInstructionSet; #[derive(Clone)] -pub struct JoltPreprocessing +pub struct JoltPreprocessing where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { pub generators: PCS::Setup, pub instruction_lookups: InstructionLookupsPreprocessing, @@ -68,9 +70,13 @@ pub struct JoltTraceStep { pub circuit_flags: [bool; NUM_CIRCUIT_FLAGS], } -pub struct ProverDebugInfo { +pub struct ProverDebugInfo +where + F: JoltField, + ProofTranscript: Transcript, +{ pub(crate) transcript: ProofTranscript, - pub(crate) opening_accumulator: ProverOpeningAccumulator, + pub(crate) opening_accumulator: ProverOpeningAccumulator, } impl JoltTraceStep { @@ -99,21 +105,31 @@ impl JoltTraceStep { } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct JoltProof -where +pub struct JoltProof< + const C: usize, + const M: usize, + I, + F, + PCS, + InstructionSet, + Subtables, + ProofTranscript, +> where I: ConstraintInput, F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, InstructionSet: JoltInstructionSet, Subtables: JoltSubtableSet, + ProofTranscript: Transcript, { pub trace_length: usize, pub program_io: JoltDevice, - pub bytecode: BytecodeProof, - pub read_write_memory: ReadWriteMemoryProof, - pub instruction_lookups: InstructionLookupsProof, - pub r1cs: UniformSpartanProof, - pub opening_proof: ReducedOpeningProof, + pub bytecode: BytecodeProof, + pub read_write_memory: ReadWriteMemoryProof, + pub instruction_lookups: + InstructionLookupsProof, + pub r1cs: UniformSpartanProof, + pub opening_proof: ReducedOpeningProof, } #[derive(Default, CanonicalSerialize, CanonicalDeserialize)] @@ -182,15 +198,17 @@ pub type JoltPolynomials = JoltStuff>; /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break /// `alloy_sol_types`. -pub type JoltCommitments = JoltStuff; +pub type JoltCommitments, ProofTranscript: Transcript> = + JoltStuff; impl< const C: usize, T: CanonicalSerialize + CanonicalDeserialize + Default + Sync, - PCS: CommitmentScheme, - > Initializable> for JoltStuff + PCS: CommitmentScheme, + ProofTranscript: Transcript, + > Initializable> for JoltStuff { - fn initialize(preprocessing: &JoltPreprocessing) -> Self { + fn initialize(preprocessing: &JoltPreprocessing) -> Self { Self { bytecode: BytecodeStuff::initialize(&preprocessing.bytecode), read_write_memory: ReadWriteMemoryStuff::initialize(&preprocessing.read_write_memory), @@ -207,11 +225,15 @@ impl< impl JoltPolynomials { #[tracing::instrument(skip_all, name = "JoltPolynomials::commit")] - pub fn commit>( + pub fn commit( &self, - preprocessing: &JoltPreprocessing, - ) -> JoltCommitments { - let mut commitments = JoltCommitments::::initialize(preprocessing); + preprocessing: &JoltPreprocessing, + ) -> JoltCommitments + where + PCS: CommitmentScheme, + ProofTranscript: Transcript, + { + let mut commitments = JoltCommitments::::initialize(preprocessing); let trace_polys = self.read_write_values(); let trace_comitments = @@ -241,7 +263,12 @@ impl JoltPolynomials { } } -pub trait Jolt, const C: usize, const M: usize> { +pub trait Jolt +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ type InstructionSet: JoltInstructionSet; type Subtables: JoltSubtableSet; type Constraints: R1CSConstraints; @@ -253,15 +280,17 @@ pub trait Jolt, const C: usize, c max_bytecode_size: usize, max_memory_address: usize, max_trace_length: usize, - ) -> JoltPreprocessing { - let bytecode_commitment_shapes = - BytecodeProof::::commit_shapes(max_bytecode_size, max_trace_length); + ) -> JoltPreprocessing { + let bytecode_commitment_shapes = BytecodeProof::::commit_shapes( + max_bytecode_size, + max_trace_length, + ); let ram_commitment_shapes = ReadWriteMemoryPolynomials::::commitment_shapes( max_memory_address, max_trace_length, ); let timestamp_range_check_commitment_shapes = - TimestampValidityProof::::commitment_shapes(max_trace_length); + TimestampValidityProof::::commitment_shapes(max_trace_length); let instruction_lookups_commitment_shapes = InstructionLookupsProof::< C, @@ -270,6 +299,7 @@ pub trait Jolt, const C: usize, c PCS, Self::InstructionSet, Self::Subtables, + ProofTranscript, >::commitment_shapes(max_trace_length); let instruction_lookups_preprocessing = InstructionLookupsPreprocessing::preprocess::< @@ -316,7 +346,7 @@ pub trait Jolt, const C: usize, c fn prove( program_io: JoltDevice, mut trace: Vec>, - preprocessing: JoltPreprocessing, + preprocessing: JoltPreprocessing, ) -> ( JoltProof< C, @@ -326,9 +356,10 @@ pub trait Jolt, const C: usize, c PCS, Self::InstructionSet, Self::Subtables, + ProofTranscript, >, - JoltCommitments, - Option>, + JoltCommitments, + Option>, ) { let trace_length = trace.len(); let padded_trace_length = trace_length.next_power_of_two(); @@ -339,16 +370,16 @@ pub trait Jolt, const C: usize, c let mut transcript = ProofTranscript::new(b"Jolt transcript"); Self::fiat_shamir_preamble(&mut transcript, &program_io, trace_length); - let instruction_polynomials = InstructionLookupsProof::< - C, - M, - F, - PCS, - Self::InstructionSet, - Self::Subtables, - >::generate_witness( - &preprocessing.instruction_lookups, &trace - ); + let instruction_polynomials = + InstructionLookupsProof::< + C, + M, + F, + PCS, + Self::InstructionSet, + Self::Subtables, + ProofTranscript, + >::generate_witness(&preprocessing.instruction_lookups, &trace); let load_store_flags = &instruction_polynomials.instruction_flags[5..10]; let (memory_polynomials, read_timestamps) = ReadWriteMemoryPolynomials::generate_witness( @@ -359,8 +390,17 @@ pub trait Jolt, const C: usize, c ); let (bytecode_polynomials, range_check_polys) = rayon::join( - || BytecodeProof::::generate_witness(&preprocessing.bytecode, &mut trace), - || TimestampValidityProof::::generate_witness(&read_timestamps), + || { + BytecodeProof::::generate_witness( + &preprocessing.bytecode, + &mut trace, + ) + }, + || { + TimestampValidityProof::::generate_witness( + &read_timestamps, + ) + }, ); let r1cs_builder = Self::Constraints::construct_constraints( @@ -371,6 +411,7 @@ pub trait Jolt, const C: usize, c C, >::Inputs, F, + ProofTranscript, >::setup(&r1cs_builder, padded_trace_length); let r1cs_polynomials = R1CSPolynomials::new::< @@ -390,7 +431,7 @@ pub trait Jolt, const C: usize, c r1cs_builder.compute_aux(&mut jolt_polynomials); - let jolt_commitments = jolt_polynomials.commit::(&preprocessing); + let jolt_commitments = jolt_polynomials.commit::(&preprocessing); transcript.append_scalar(&spartan_key.vk_digest); @@ -403,7 +444,8 @@ pub trait Jolt, const C: usize, c .iter() .for_each(|value| value.append_to_transcript(&mut transcript)); - let mut opening_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); + let mut opening_accumulator: ProverOpeningAccumulator = + ProverOpeningAccumulator::new(); let bytecode_proof = BytecodeProof::prove_memory_checking( &preprocessing.generators, @@ -435,6 +477,7 @@ pub trait Jolt, const C: usize, c C, >::Inputs, F, + ProofTranscript, >::prove::( &r1cs_builder, &spartan_key, @@ -472,7 +515,7 @@ pub trait Jolt, const C: usize, c #[tracing::instrument(skip_all)] fn verify( - mut preprocessing: JoltPreprocessing, + mut preprocessing: JoltPreprocessing, proof: JoltProof< C, M, @@ -481,12 +524,13 @@ pub trait Jolt, const C: usize, c PCS, Self::InstructionSet, Self::Subtables, + ProofTranscript, >, - commitments: JoltCommitments, - _debug_info: Option>, + commitments: JoltCommitments, + _debug_info: Option>, ) -> Result<(), ProofVerifyError> { let mut transcript = ProofTranscript::new(b"Jolt transcript"); - let mut opening_accumulator: VerifierOpeningAccumulator = + let mut opening_accumulator: VerifierOpeningAccumulator = VerifierOpeningAccumulator::new(); #[cfg(test)] @@ -502,12 +546,16 @@ pub trait Jolt, const C: usize, c let memory_start = RAM_START_ADDRESS - proof.program_io.memory_layout.ram_witness_offset; let r1cs_builder = Self::Constraints::construct_constraints(padded_trace_length, memory_start); - let spartan_key = spartan::UniformSpartanProof::setup(&r1cs_builder, padded_trace_length); + let spartan_key = spartan::UniformSpartanProof::::setup( + &r1cs_builder, + padded_trace_length, + ); transcript.append_scalar(&spartan_key.vk_digest); let r1cs_proof = R1CSProof { key: spartan_key, proof: proof.r1cs, + _marker: PhantomData, }; commitments @@ -566,9 +614,17 @@ pub trait Jolt, const C: usize, c fn verify_instruction_lookups<'a>( preprocessing: &InstructionLookupsPreprocessing, generators: &PCS::Setup, - proof: InstructionLookupsProof, - commitments: &'a JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + proof: InstructionLookupsProof< + C, + M, + F, + PCS, + Self::InstructionSet, + Self::Subtables, + ProofTranscript, + >, + commitments: &'a JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { InstructionLookupsProof::verify( @@ -585,9 +641,9 @@ pub trait Jolt, const C: usize, c fn verify_bytecode<'a>( preprocessing: &BytecodePreprocessing, generators: &PCS::Setup, - proof: BytecodeProof, - commitments: &'a JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + proof: BytecodeProof, + commitments: &'a JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { BytecodeProof::verify_memory_checking( @@ -605,10 +661,10 @@ pub trait Jolt, const C: usize, c fn verify_memory<'a>( preprocessing: &mut ReadWriteMemoryPreprocessing, generators: &PCS::Setup, - proof: ReadWriteMemoryProof, - commitment: &'a JoltCommitments, + proof: ReadWriteMemoryProof, + commitment: &'a JoltCommitments, program_io: JoltDevice, - opening_accumulator: &mut VerifierOpeningAccumulator, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { assert!(program_io.inputs.len() <= program_io.memory_layout.max_input_size as usize); @@ -627,9 +683,14 @@ pub trait Jolt, const C: usize, c #[tracing::instrument(skip_all)] fn verify_r1cs<'a>( - proof: R1CSProof>::Inputs, F>, - commitments: &'a JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + proof: R1CSProof< + C, + >::Inputs, + F, + ProofTranscript, + >, + commitments: &'a JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { proof diff --git a/jolt-core/src/jolt/vm/read_write_memory.rs b/jolt-core/src/jolt/vm/read_write_memory.rs index 7cc31ddc8..20fb33cf6 100644 --- a/jolt-core/src/jolt/vm/read_write_memory.rs +++ b/jolt-core/src/jolt/vm/read_write_memory.rs @@ -14,6 +14,7 @@ use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use crate::poly::commitment::commitment_scheme::{BatchType, CommitShape, CommitmentScheme}; +use crate::utils::transcript::Transcript; use crate::{ lasso::memory_checking::{ MemoryCheckingProof, MemoryCheckingProver, MemoryCheckingVerifier, MultisetHashes, @@ -22,7 +23,7 @@ use crate::{ dense_mlpoly::DensePolynomial, eq_poly::EqPolynomial, identity_poly::IdentityPolynomial, }, subprotocols::sumcheck::SumcheckInstanceProof, - utils::{errors::ProofVerifyError, math::Math, mul_0_optimized, transcript::ProofTranscript}, + utils::{errors::ProofVerifyError, math::Math, mul_0_optimized}, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use common::constants::{ @@ -179,7 +180,10 @@ pub type ReadWriteMemoryOpenings = ReadWriteMemoryStuff; /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break /// `alloy_sol_types`. -pub type ReadWriteMemoryCommitments = ReadWriteMemoryStuff; +pub type ReadWriteMemoryCommitments< + PCS: CommitmentScheme, + ProofTranscript: Transcript, +> = ReadWriteMemoryStuff; impl Initializable for ReadWriteMemoryStuff @@ -866,14 +870,16 @@ impl ReadWriteMemoryPolynomials { } } -impl MemoryCheckingProver for ReadWriteMemoryProof +impl MemoryCheckingProver + for ReadWriteMemoryProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type Polynomials = ReadWriteMemoryPolynomials; type Openings = ReadWriteMemoryOpenings; - type Commitments = ReadWriteMemoryCommitments; + type Commitments = ReadWriteMemoryCommitments; type Preprocessing = ReadWriteMemoryPreprocessing; type ExogenousOpenings = RegisterAddressOpenings; @@ -1038,10 +1044,12 @@ where } } -impl MemoryCheckingVerifier for ReadWriteMemoryProof +impl MemoryCheckingVerifier + for ReadWriteMemoryProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { fn compute_verifier_openings( openings: &mut Self::Openings, @@ -1156,28 +1164,30 @@ where } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct OutputSumcheckProof +pub struct OutputSumcheckProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { - _pcs: PhantomData, + _pcs: PhantomData<(PCS, ProofTranscript)>, num_rounds: usize, /// Sumcheck proof that v_final is equal to the program outputs at the relevant indices. - sumcheck_proof: SumcheckInstanceProof, + sumcheck_proof: SumcheckInstanceProof, /// Opening of v_final at the random point chosen over the course of sumcheck opening: F, } -impl OutputSumcheckProof +impl OutputSumcheckProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { fn prove_outputs( polynomials: &ReadWriteMemoryPolynomials, program_io: &JoltDevice, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Self { let memory_size = polynomials.v_final.len(); @@ -1242,7 +1252,7 @@ where let output_check_fn = |vals: &[F]| -> F { vals[0] * vals[1] * (vals[2] - vals[3]) }; let (sumcheck_proof, r_sumcheck, sumcheck_openings) = - SumcheckInstanceProof::::prove_arbitrary::<_>( + SumcheckInstanceProof::::prove_arbitrary::<_>( &F::zero(), num_rounds, &mut sumcheck_polys, @@ -1270,8 +1280,8 @@ where fn verify( proof: &Self, preprocessing: &ReadWriteMemoryPreprocessing, - commitment: &ReadWriteMemoryCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + commitment: &ReadWriteMemoryCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { let r_eq = transcript.challenge_vector(proof.num_rounds); @@ -1367,21 +1377,28 @@ where } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct ReadWriteMemoryProof +pub struct ReadWriteMemoryProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { - pub memory_checking_proof: - MemoryCheckingProof, RegisterAddressOpenings>, - pub timestamp_validity_proof: TimestampValidityProof, - pub output_proof: OutputSumcheckProof, + pub memory_checking_proof: MemoryCheckingProof< + F, + PCS, + ReadWriteMemoryOpenings, + RegisterAddressOpenings, + ProofTranscript, + >, + pub timestamp_validity_proof: TimestampValidityProof, + pub output_proof: OutputSumcheckProof, } -impl ReadWriteMemoryProof +impl ReadWriteMemoryProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { #[tracing::instrument(skip_all, name = "ReadWriteMemoryProof::prove")] pub fn prove<'a>( @@ -1389,7 +1406,7 @@ where preprocessing: &ReadWriteMemoryPreprocessing, polynomials: &'a JoltPolynomials, program_io: &JoltDevice, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Self { let memory_checking_proof = ReadWriteMemoryProof::prove_memory_checking( @@ -1427,8 +1444,8 @@ where mut self, generators: &PCS::Setup, preprocessing: &ReadWriteMemoryPreprocessing, - commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { ReadWriteMemoryProof::verify_memory_checking( diff --git a/jolt-core/src/jolt/vm/rv32i_vm.rs b/jolt-core/src/jolt/vm/rv32i_vm.rs index 13f8d77b4..ba09b6787 100644 --- a/jolt-core/src/jolt/vm/rv32i_vm.rs +++ b/jolt-core/src/jolt/vm/rv32i_vm.rs @@ -173,18 +173,21 @@ pub enum RV32IJoltVM {} pub const C: usize = 4; pub const M: usize = 1 << 16; -impl Jolt for RV32IJoltVM +impl Jolt for RV32IJoltVM where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type InstructionSet = RV32I; type Subtables = RV32ISubtables; type Constraints = JoltRV32IMConstraints; } -pub type RV32IJoltProof = JoltProof>; +pub type RV32IJoltProof = + JoltProof, ProofTranscript>; +use crate::utils::transcript::{KeccakTranscript, Transcript}; use eyre::Result; use std::fs::File; use std::io::Cursor; @@ -225,11 +228,12 @@ pub trait Serializable: CanonicalSerialize + CanonicalDeserialize + Sized { } } -pub type PCS = HyperKZG; +pub type ProofTranscript = KeccakTranscript; +pub type PCS = HyperKZG; #[derive(CanonicalSerialize, CanonicalDeserialize)] pub struct JoltHyperKZGProof { - pub proof: RV32IJoltProof, - pub commitments: JoltCommitments, + pub proof: RV32IJoltProof, + pub commitments: JoltCommitments, } impl Serializable for JoltHyperKZGProof {} @@ -251,6 +255,7 @@ mod tests { use crate::poly::commitment::hyrax::HyraxScheme; use crate::poly::commitment::mock::MockCommitScheme; use crate::poly::commitment::zeromorph::Zeromorph; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use std::sync::Mutex; use strum::{EnumCount, IntoEnumIterator}; @@ -260,31 +265,46 @@ mod tests { static ref SHA3_FILE_LOCK: Mutex<()> = Mutex::new(()); } - fn test_instruction_set_subtables() { + fn test_instruction_set_subtables() + where + PCS: CommitmentScheme, + ProofTranscript: Transcript, + { let mut subtable_set: HashSet<_> = HashSet::new(); - for instruction in >::InstructionSet::iter() { + for instruction in + >::InstructionSet::iter() + { for (subtable, _) in instruction.subtables::(C, M) { // panics if subtable cannot be cast to enum variant - let _ = - >::Subtables::from(subtable.subtable_id()); + let _ = >::Subtables::from( + subtable.subtable_id(), + ); subtable_set.insert(subtable.subtable_id()); } } assert_eq!( subtable_set.len(), - >::Subtables::COUNT, + >::Subtables::COUNT, "Unused enum variants in Subtables" ); } #[test] fn instruction_set_subtables() { - test_instruction_set_subtables::>(); - test_instruction_set_subtables::>(); - test_instruction_set_subtables::>(); + test_instruction_set_subtables::< + HyraxScheme, + KeccakTranscript, + >(); + test_instruction_set_subtables::, KeccakTranscript>(); + test_instruction_set_subtables::, KeccakTranscript>(); } - fn fib_e2e>() { + fn fib_e2e() + where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, + { let artifact_guard = FIB_FILE_LOCK.lock().unwrap(); let mut program = host::Program::new("fibonacci-guest"); program.set_input(&9u32); @@ -295,7 +315,11 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); let (proof, commitments, debug_info) = - >::prove(io_device, trace, preprocessing.clone()); + >::prove( + io_device, + trace, + preprocessing.clone(), + ); let verification_result = RV32IJoltVM::verify(preprocessing, proof, commitments, debug_info); assert!( @@ -307,23 +331,27 @@ mod tests { #[test] fn fib_e2e_mock() { - fib_e2e::>(); + fib_e2e::, KeccakTranscript>(); } #[ignore = "Opening proof reduction for Hyrax doesn't work right now"] #[test] fn fib_e2e_hyrax() { - fib_e2e::>(); + fib_e2e::< + ark_bn254::Fr, + HyraxScheme, + KeccakTranscript, + >(); } #[test] fn fib_e2e_zeromorph() { - fib_e2e::>(); + fib_e2e::, KeccakTranscript>(); } #[test] fn fib_e2e_hyperkzg() { - fib_e2e::>(); + fib_e2e::, KeccakTranscript>(); } // TODO(sragss): Finish Binius. @@ -346,11 +374,13 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); let (jolt_proof, jolt_commitments, debug_info) = - , C, M>>::prove( - io_device, - trace, - preprocessing.clone(), - ); + , + C, + M, + KeccakTranscript, + >>::prove(io_device, trace, preprocessing.clone()); let verification_result = RV32IJoltVM::verify(preprocessing, jolt_proof, jolt_commitments, debug_info); assert!( @@ -374,11 +404,13 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); let (jolt_proof, jolt_commitments, debug_info) = - , C, M>>::prove( - io_device, - trace, - preprocessing.clone(), - ); + , + C, + M, + KeccakTranscript, + >>::prove(io_device, trace, preprocessing.clone()); let verification_result = RV32IJoltVM::verify(preprocessing, jolt_proof, jolt_commitments, debug_info); @@ -401,12 +433,15 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); - let (jolt_proof, jolt_commitments, debug_info) = - , C, M>>::prove( - io_device, - trace, - preprocessing.clone(), - ); + let (jolt_proof, jolt_commitments, debug_info) = , + C, + M, + KeccakTranscript, + >>::prove( + io_device, trace, preprocessing.clone() + ); let verification_result = RV32IJoltVM::verify(preprocessing, jolt_proof, jolt_commitments, debug_info); @@ -429,12 +464,15 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); - let (jolt_proof, jolt_commitments, debug_info) = - , C, M>>::prove( - io_device, - trace, - preprocessing.clone(), - ); + let (jolt_proof, jolt_commitments, debug_info) = , + C, + M, + KeccakTranscript, + >>::prove( + io_device, trace, preprocessing.clone() + ); let verification_result = RV32IJoltVM::verify(preprocessing, jolt_proof, jolt_commitments, debug_info); @@ -459,12 +497,15 @@ mod tests { let preprocessing = RV32IJoltVM::preprocess(bytecode.clone(), memory_init, 1 << 20, 1 << 20, 1 << 20); - let (proof, commitments, debug_info) = - , C, M>>::prove( - io_device, - trace, - preprocessing.clone(), - ); + let (proof, commitments, debug_info) = , + C, + M, + KeccakTranscript, + >>::prove( + io_device, trace, preprocessing.clone() + ); let verification_result = RV32IJoltVM::verify(preprocessing, proof, commitments, debug_info); assert!( diff --git a/jolt-core/src/jolt/vm/timestamp_range_check.rs b/jolt-core/src/jolt/vm/timestamp_range_check.rs index 3f9a497ce..037ee536d 100644 --- a/jolt-core/src/jolt/vm/timestamp_range_check.rs +++ b/jolt-core/src/jolt/vm/timestamp_range_check.rs @@ -17,6 +17,7 @@ use std::collections::HashSet; use std::iter::zip; use crate::poly::commitment::commitment_scheme::{BatchType, CommitShape, CommitmentScheme}; +use crate::utils::transcript::Transcript; use crate::{ lasso::memory_checking::{ MemoryCheckingProof, MemoryCheckingProver, MemoryCheckingVerifier, MultisetHashes, @@ -25,7 +26,7 @@ use crate::{ poly::{ dense_mlpoly::DensePolynomial, eq_poly::EqPolynomial, identity_poly::IdentityPolynomial, }, - utils::{errors::ProofVerifyError, mul_0_1_optimized, transcript::ProofTranscript}, + utils::{errors::ProofVerifyError, mul_0_1_optimized}, }; use super::{JoltCommitments, JoltPolynomials, JoltStuff}; @@ -83,8 +84,10 @@ pub type TimestampRangeCheckOpenings = TimestampRangeCheckStuff /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break /// `alloy_sol_types`. -pub type TimestampRangeCheckCommitments = - TimestampRangeCheckStuff; +pub type TimestampRangeCheckCommitments< + PCS: CommitmentScheme, + ProofTranscript: Transcript, +> = TimestampRangeCheckStuff; impl Initializable for TimestampRangeCheckStuff @@ -112,7 +115,12 @@ impl ExogenousOpenings for ReadTimestampOpenings { } } -impl> TimestampValidityProof { +impl TimestampValidityProof +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ #[tracing::instrument(skip_all, name = "TimestampRangeCheckWitness::new")] pub fn generate_witness( read_timestamps: &[Vec; MEMORY_OPS_PER_INSTRUCTION], @@ -230,14 +238,16 @@ impl> TimestampValidityProof MemoryCheckingProver for TimestampValidityProof +impl MemoryCheckingProver + for TimestampValidityProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type Polynomials = TimestampRangeCheckPolynomials; type Openings = TimestampRangeCheckOpenings; - type Commitments = TimestampRangeCheckCommitments; + type Commitments = TimestampRangeCheckCommitments; type ExogenousOpenings = ReadTimestampOpenings; // Init/final grand products are batched together with read/write grand products @@ -248,9 +258,9 @@ where _: &NoPreprocessing, _: &Self::Polynomials, _: &JoltPolynomials, - _: &mut ProverOpeningAccumulator, + _: &mut ProverOpeningAccumulator, _: &mut ProofTranscript, - ) -> MemoryCheckingProof { + ) -> MemoryCheckingProof { unimplemented!("Use TimestampValidityProof::prove instead"); } @@ -432,10 +442,12 @@ where } } -impl MemoryCheckingVerifier for TimestampValidityProof +impl MemoryCheckingVerifier + for TimestampValidityProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { fn compute_verifier_openings(_: &mut Self::Openings, _: &NoPreprocessing, _: &[F], _: &[F]) { unimplemented!("") @@ -444,10 +456,16 @@ where fn verify_memory_checking( _: &NoPreprocessing, _: &PCS::Setup, - mut _proof: MemoryCheckingProof, + mut _proof: MemoryCheckingProof< + F, + PCS, + Self::Openings, + Self::ExogenousOpenings, + ProofTranscript, + >, _commitments: &Self::Commitments, - _: &JoltCommitments, - _opening_accumulator: &mut VerifierOpeningAccumulator, + _: &JoltCommitments, + _opening_accumulator: &mut VerifierOpeningAccumulator, _transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { unimplemented!("Use TimestampValidityProof::verify instead"); @@ -536,8 +554,11 @@ where } pub struct NoopGrandProduct; -impl> BatchedGrandProduct - for NoopGrandProduct +impl BatchedGrandProduct for NoopGrandProduct +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type Leaves = (); type Config = (); @@ -555,22 +576,24 @@ impl> BatchedGrandProduct unimplemented!("init/final grand products are batched with read/write grand products"); } - fn layers(&'_ mut self) -> impl Iterator> { + fn layers( + &'_ mut self, + ) -> impl Iterator> { std::iter::empty() // Needed to compile } fn prove_grand_product( &mut self, - _opening_accumulator: Option<&mut ProverOpeningAccumulator>, + _opening_accumulator: Option<&mut ProverOpeningAccumulator>, _transcript: &mut ProofTranscript, _setup: Option<&PCS::Setup>, - ) -> (BatchedGrandProductProof, Vec) { + ) -> (BatchedGrandProductProof, Vec) { unimplemented!("init/final grand products are batched with read/write grand products") } fn verify_grand_product( - _proof: &BatchedGrandProductProof, + _proof: &BatchedGrandProductProof, _claims: &Vec, - _opening_accumulator: Option<&mut VerifierOpeningAccumulator>, + _opening_accumulator: Option<&mut VerifierOpeningAccumulator>, _transcript: &mut ProofTranscript, _setup: Option<&PCS::Setup>, ) -> (Vec, Vec) { @@ -579,28 +602,30 @@ impl> BatchedGrandProduct } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct TimestampValidityProof +pub struct TimestampValidityProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { multiset_hashes: MultisetHashes, openings: TimestampRangeCheckOpenings, exogenous_openings: ReadTimestampOpenings, - batched_grand_product: BatchedGrandProductProof, + batched_grand_product: BatchedGrandProductProof, } -impl TimestampValidityProof +impl TimestampValidityProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { #[tracing::instrument(skip_all, name = "TimestampValidityProof::prove")] pub fn prove<'a>( generators: &PCS::Setup, polynomials: &'a TimestampRangeCheckPolynomials, jolt_polynomials: &'a JoltPolynomials, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Self { let (batched_grand_product, multiset_hashes, r_grand_product) = @@ -659,17 +684,22 @@ where fn prove_grand_products( polynomials: &TimestampRangeCheckPolynomials, jolt_polynomials: &JoltPolynomials, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, setup: &PCS::Setup, - ) -> (BatchedGrandProductProof, MultisetHashes, Vec) { + ) -> ( + BatchedGrandProductProof, + MultisetHashes, + Vec, + ) { // Fiat-Shamir randomness for multiset hashes let gamma: F = transcript.challenge_scalar(); let tau: F = transcript.challenge_scalar(); - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); - let (leaves, _) = TimestampValidityProof::::compute_leaves( + let (leaves, _) = TimestampValidityProof::::compute_leaves( &NoPreprocessing, polynomials, jolt_polynomials, @@ -678,18 +708,27 @@ where ); let mut batched_circuit = - as BatchedGrandProduct>::construct(leaves); + as BatchedGrandProduct< + F, + PCS, + ProofTranscript, + >>::construct(leaves); let hashes: Vec = - as BatchedGrandProduct>::claims(&batched_circuit); + as BatchedGrandProduct< + F, + PCS, + ProofTranscript, + >>::claims(&batched_circuit); let (read_write_hashes, init_final_hashes) = hashes.split_at(4 * MEMORY_OPS_PER_INSTRUCTION); - let multiset_hashes = TimestampValidityProof::::uninterleave_hashes( - &NoPreprocessing, - read_write_hashes.to_vec(), - init_final_hashes.to_vec(), - ); - TimestampValidityProof::::check_multiset_equality( + let multiset_hashes = + TimestampValidityProof::::uninterleave_hashes( + &NoPreprocessing, + read_write_hashes.to_vec(), + init_final_hashes.to_vec(), + ); + TimestampValidityProof::::check_multiset_equality( &NoPreprocessing, &multiset_hashes, ); @@ -706,25 +745,26 @@ where pub fn verify( &mut self, generators: &PCS::Setup, - commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { // Fiat-Shamir randomness for multiset hashes let gamma: F = transcript.challenge_scalar(); let tau: F = transcript.challenge_scalar(); - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); // Multiset equality checks - TimestampValidityProof::::check_multiset_equality( + TimestampValidityProof::::check_multiset_equality( &NoPreprocessing, &self.multiset_hashes, ); self.multiset_hashes.append_to_transcript(transcript); let (read_write_hashes, init_final_hashes) = - TimestampValidityProof::::interleave_hashes( + TimestampValidityProof::::interleave_hashes( &NoPreprocessing, &self.multiset_hashes, ); @@ -758,37 +798,45 @@ where self.openings.identity = Some(IdentityPolynomial::new(r_grand_product.len()).evaluate(&r_grand_product)); - let read_hashes: Vec<_> = TimestampValidityProof::::read_tuples( + let read_hashes: Vec<_> = TimestampValidityProof::::read_tuples( &NoPreprocessing, &self.openings, &self.exogenous_openings, ) .iter() - .map(|tuple| TimestampValidityProof::::fingerprint(tuple, &gamma, &tau)) + .map(|tuple| { + TimestampValidityProof::::fingerprint(tuple, &gamma, &tau) + }) .collect(); - let write_hashes: Vec<_> = TimestampValidityProof::::write_tuples( + let write_hashes: Vec<_> = TimestampValidityProof::::write_tuples( &NoPreprocessing, &self.openings, &self.exogenous_openings, ) .iter() - .map(|tuple| TimestampValidityProof::::fingerprint(tuple, &gamma, &tau)) + .map(|tuple| { + TimestampValidityProof::::fingerprint(tuple, &gamma, &tau) + }) .collect(); - let init_hashes: Vec<_> = TimestampValidityProof::::init_tuples( + let init_hashes: Vec<_> = TimestampValidityProof::::init_tuples( &NoPreprocessing, &self.openings, &self.exogenous_openings, ) .iter() - .map(|tuple| TimestampValidityProof::::fingerprint(tuple, &gamma, &tau)) + .map(|tuple| { + TimestampValidityProof::::fingerprint(tuple, &gamma, &tau) + }) .collect(); - let final_hashes: Vec<_> = TimestampValidityProof::::final_tuples( + let final_hashes: Vec<_> = TimestampValidityProof::::final_tuples( &NoPreprocessing, &self.openings, &self.exogenous_openings, ) .iter() - .map(|tuple| TimestampValidityProof::::fingerprint(tuple, &gamma, &tau)) + .map(|tuple| { + TimestampValidityProof::::fingerprint(tuple, &gamma, &tau) + }) .collect(); assert_eq!( @@ -805,7 +853,10 @@ where final_hashes, }; let (read_write_hashes, init_final_hashes) = - TimestampValidityProof::::interleave_hashes(&NoPreprocessing, &multiset_hashes); + TimestampValidityProof::::interleave_hashes( + &NoPreprocessing, + &multiset_hashes, + ); for (claim, fingerprint) in zip(read_write_claims, read_write_hashes) { assert_eq!(*claim, fingerprint); diff --git a/jolt-core/src/lasso/memory_checking.rs b/jolt-core/src/lasso/memory_checking.rs index 634c97200..cfc7e8ab8 100644 --- a/jolt-core/src/lasso/memory_checking.rs +++ b/jolt-core/src/lasso/memory_checking.rs @@ -7,7 +7,7 @@ use crate::poly::eq_poly::EqPolynomial; use crate::poly::opening_proof::{ProverOpeningAccumulator, VerifierOpeningAccumulator}; use crate::utils::errors::ProofVerifyError; use crate::utils::thread::drop_in_background_thread; -use crate::utils::transcript::ProofTranscript; +use crate::utils::transcript::Transcript; use crate::{ poly::commitment::commitment_scheme::CommitmentScheme, subprotocols::grand_product::{ @@ -34,7 +34,10 @@ pub struct MultisetHashes { } impl MultisetHashes { - pub fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + pub fn append_to_transcript( + &self, + transcript: &mut ProofTranscript, + ) { transcript.append_scalars(&self.read_hashes); transcript.append_scalars(&self.write_hashes); transcript.append_scalars(&self.init_hashes); @@ -43,21 +46,22 @@ impl MultisetHashes { } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct MemoryCheckingProof +pub struct MemoryCheckingProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Openings: StructuredPolynomialData + Sync + CanonicalSerialize + CanonicalDeserialize, OtherOpenings: ExogenousOpenings + Sync, + ProofTranscript: Transcript, { /// Read/write/init/final multiset hashes for each memory pub multiset_hashes: MultisetHashes, /// The read and write grand products for every memory has the same size, /// so they can be batched. - pub read_write_grand_product: BatchedGrandProductProof, + pub read_write_grand_product: BatchedGrandProductProof, /// The init and final grand products for every memory has the same size, /// so they can be batched. - pub init_final_grand_product: BatchedGrandProductProof, + pub init_final_grand_product: BatchedGrandProductProof, /// The openings associated with the grand products. pub openings: Openings, pub exogenous_openings: OtherOpenings, @@ -198,16 +202,17 @@ pub trait Initializable: StructuredPolynomialData + Default // Empty struct to represent that no preprocessing data is used. pub struct NoPreprocessing; -pub trait MemoryCheckingProver +pub trait MemoryCheckingProver where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, Self: Sync, { - type ReadWriteGrandProduct: BatchedGrandProduct + Send + 'static = - BatchedDenseGrandProduct; - type InitFinalGrandProduct: BatchedGrandProduct + Send + 'static = - BatchedDenseGrandProduct; + type ReadWriteGrandProduct: BatchedGrandProduct + Send + 'static = + BatchedDenseGrandProduct; + type InitFinalGrandProduct: BatchedGrandProduct + Send + 'static = + BatchedDenseGrandProduct; type Polynomials: StructuredPolynomialData>; type Openings: StructuredPolynomialData + Sync + Initializable; @@ -226,9 +231,9 @@ where preprocessing: &Self::Preprocessing, polynomials: &Self::Polynomials, jolt_polynomials: &JoltPolynomials, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, - ) -> MemoryCheckingProof { + ) -> MemoryCheckingProof { let ( read_write_grand_product, init_final_grand_product, @@ -269,12 +274,12 @@ where preprocessing: &Self::Preprocessing, polynomials: &Self::Polynomials, jolt_polynomials: &JoltPolynomials, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, pcs_setup: &PCS::Setup, ) -> ( - BatchedGrandProductProof, - BatchedGrandProductProof, + BatchedGrandProductProof, + BatchedGrandProductProof, MultisetHashes, Vec, Vec, @@ -283,7 +288,8 @@ where let gamma: F = transcript.challenge_scalar(); let tau: F = transcript.challenge_scalar(); - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let (read_write_leaves, init_final_leaves) = Self::compute_leaves(preprocessing, polynomials, jolt_polynomials, &gamma, &tau); @@ -322,7 +328,7 @@ where fn compute_openings( preprocessing: &Self::Preprocessing, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, polynomials: &Self::Polynomials, jolt_polynomials: &JoltPolynomials, r_read_write: &[F], @@ -389,7 +395,11 @@ where fn read_write_grand_product( _preprocessing: &Self::Preprocessing, _polynomials: &Self::Polynomials, - read_write_leaves: >::Leaves, + read_write_leaves: >::Leaves, ) -> (Self::ReadWriteGrandProduct, Vec) { let batched_circuit = Self::ReadWriteGrandProduct::construct(read_write_leaves); let claims = batched_circuit.claims(); @@ -402,7 +412,11 @@ where fn init_final_grand_product( _preprocessing: &Self::Preprocessing, _polynomials: &Self::Polynomials, - init_final_leaves: >::Leaves, + init_final_leaves: >::Leaves, ) -> (Self::InitFinalGrandProduct, Vec) { let batched_circuit = Self::InitFinalGrandProduct::construct(init_final_leaves); let claims = batched_circuit.claims(); @@ -489,8 +503,8 @@ where gamma: &F, tau: &F, ) -> ( - >::Leaves, - >::Leaves, + >::Leaves, + >::Leaves, ); /// Computes the Reed-Solomon fingerprint (parametrized by `gamma` and `tau`) of the given memory `tuple`. @@ -501,26 +515,35 @@ where fn protocol_name() -> &'static [u8]; } -pub trait MemoryCheckingVerifier: MemoryCheckingProver +pub trait MemoryCheckingVerifier: + MemoryCheckingProver where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { /// Verifies a memory checking proof, given its associated polynomial `commitment`. fn verify_memory_checking( preprocessing: &Self::Preprocessing, pcs_setup: &PCS::Setup, - mut proof: MemoryCheckingProof, + mut proof: MemoryCheckingProof< + F, + PCS, + Self::Openings, + Self::ExogenousOpenings, + ProofTranscript, + >, commitments: &Self::Commitments, - jolt_commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + jolt_commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { // Fiat-Shamir randomness for multiset hashes let gamma: F = transcript.challenge_scalar(); let tau: F = transcript.challenge_scalar(); - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); Self::check_multiset_equality(preprocessing, &proof.multiset_hashes); proof.multiset_hashes.append_to_transcript(transcript); diff --git a/jolt-core/src/lasso/surge.rs b/jolt-core/src/lasso/surge.rs index 0b94b9aeb..2e3fba262 100644 --- a/jolt-core/src/lasso/surge.rs +++ b/jolt-core/src/lasso/surge.rs @@ -10,6 +10,9 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; use std::marker::{PhantomData, Sync}; +use super::memory_checking::{ + Initializable, NoExogenousOpenings, StructuredPolynomialData, VerifierComputedOpening, +}; use crate::{ jolt::instruction::JoltInstruction, lasso::memory_checking::{MemoryCheckingProof, MemoryCheckingProver, MemoryCheckingVerifier}, @@ -20,11 +23,7 @@ use crate::{ identity_poly::IdentityPolynomial, }, subprotocols::sumcheck::SumcheckInstanceProof, - utils::{errors::ProofVerifyError, math::Math, mul_0_1_optimized, transcript::ProofTranscript}, -}; - -use super::memory_checking::{ - Initializable, NoExogenousOpenings, StructuredPolynomialData, VerifierComputedOpening, + utils::{errors::ProofVerifyError, math::Math, mul_0_1_optimized, transcript::Transcript}, }; #[derive(Default, CanonicalSerialize, CanonicalDeserialize)] @@ -44,7 +43,8 @@ pub struct SurgeStuff { pub type SurgePolynomials = SurgeStuff>; pub type SurgeOpenings = SurgeStuff; -pub type SurgeCommitments = SurgeStuff; +pub type SurgeCommitments, ProofTranscript: Transcript> = + SurgeStuff; impl Initializable> for SurgeStuff @@ -94,16 +94,17 @@ impl StructuredPolynomialData f } } -impl MemoryCheckingProver - for SurgeProof +impl + MemoryCheckingProver + for SurgeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Instruction: JoltInstruction + Default + Sync, { type Polynomials = SurgePolynomials; type Openings = SurgeOpenings; - type Commitments = SurgeCommitments; + type Commitments = SurgeCommitments; type Preprocessing = SurgePreprocessing; fn fingerprint(inputs: &(F, F, F), gamma: &F, tau: &F) -> F { @@ -183,12 +184,14 @@ where } } -impl MemoryCheckingVerifier - for SurgeProof +impl + MemoryCheckingVerifier + for SurgeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Instruction: JoltInstruction + Default + Sync, + ProofTranscript: Transcript, { fn compute_verifier_openings( openings: &mut Self::Openings, @@ -278,14 +281,16 @@ where } } -pub struct SurgePrimarySumcheck +pub struct SurgePrimarySumcheck where F: JoltField, + ProofTranscript: Transcript, { - sumcheck_proof: SumcheckInstanceProof, + sumcheck_proof: SumcheckInstanceProof, num_rounds: usize, claimed_evaluation: F, E_poly_openings: Vec, + _marker: PhantomData, } pub struct SurgePreprocessing @@ -298,20 +303,22 @@ where } #[allow(clippy::type_complexity)] -pub struct SurgeProof +pub struct SurgeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Instruction: JoltInstruction + Default, + ProofTranscript: Transcript, { _instruction: PhantomData, /// Commitments to all polynomials - commitments: SurgeCommitments, + commitments: SurgeCommitments, /// Primary collation sumcheck proof - primary_sumcheck: SurgePrimarySumcheck, + primary_sumcheck: SurgePrimarySumcheck, - memory_checking: MemoryCheckingProof, NoExogenousOpenings>, + memory_checking: + MemoryCheckingProof, NoExogenousOpenings, ProofTranscript>, } impl SurgePreprocessing @@ -338,11 +345,13 @@ where } } -impl SurgeProof +impl + SurgeProof where F: JoltField, - PCS: CommitmentScheme, + PCS: CommitmentScheme, Instruction: JoltInstruction + Default + Sync, + ProofTranscript: Transcript, { // TODO(moodlezoup): We can be more efficient (use fewer memories) if we use subtable_indices fn num_memories() -> usize { @@ -378,15 +387,17 @@ where preprocessing: &SurgePreprocessing, generators: &PCS::Setup, ops: Vec, - ) -> (Self, Option>) { + ) -> (Self, Option>) { let mut transcript = ProofTranscript::new(b"Surge transcript"); - let mut opening_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); - transcript.append_protocol_name(Self::protocol_name()); + let mut opening_accumulator: ProverOpeningAccumulator = + ProverOpeningAccumulator::new(); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let num_lookups = ops.len().next_power_of_two(); let polynomials = Self::generate_witness(preprocessing, &ops); - let mut commitments = SurgeCommitments::::initialize(preprocessing); + let mut commitments = SurgeCommitments::::initialize(preprocessing); let trace_polys = polynomials.read_write_values(); let trace_comitments = PCS::batch_commit_polys_ref(&trace_polys, generators, BatchType::SurgeReadWrite); @@ -422,7 +433,7 @@ where }; let (primary_sumcheck_proof, r_z, mut sumcheck_openings) = - SumcheckInstanceProof::::prove_arbitrary::<_>( + SumcheckInstanceProof::::prove_arbitrary::<_>( &sumcheck_claim, num_rounds, &mut combined_sumcheck_polys, @@ -447,6 +458,7 @@ where sumcheck_proof: primary_sumcheck_proof, num_rounds, E_poly_openings: sumcheck_openings, + _marker: PhantomData, }; let memory_checking = SurgeProof::prove_memory_checking( @@ -478,11 +490,11 @@ where pub fn verify( preprocessing: &SurgePreprocessing, generators: &PCS::Setup, - proof: SurgeProof, - _debug_info: Option>, + proof: SurgeProof, + _debug_info: Option>, ) -> Result<(), ProofVerifyError> { let mut transcript = ProofTranscript::new(b"Surge transcript"); - let mut opening_accumulator: VerifierOpeningAccumulator = + let mut opening_accumulator: VerifierOpeningAccumulator = VerifierOpeningAccumulator::new(); #[cfg(test)] if let Some(debug_info) = _debug_info { @@ -490,7 +502,8 @@ where opening_accumulator.compare_to(debug_info.opening_accumulator, &generators); } - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let instruction = Instruction::default(); let r_primary_sumcheck = transcript.challenge_vector(proof.primary_sumcheck.num_rounds); @@ -527,7 +540,7 @@ where generators, proof.memory_checking, &proof.commitments, - &JoltCommitments::::default(), + &JoltCommitments::::default(), &mut opening_accumulator, &mut transcript, ) @@ -643,6 +656,7 @@ where #[cfg(test)] mod tests { use super::SurgePreprocessing; + use crate::utils::transcript::KeccakTranscript; use crate::{ jolt::instruction::xor::XORInstruction, lasso::surge::SurgeProof, @@ -670,13 +684,18 @@ mod tests { .collect(); let preprocessing = SurgePreprocessing::preprocess(); - let generators = HyperKZG::setup(&[CommitShape::new(M, BatchType::SurgeReadWrite)]); - let (proof, debug_info) = - SurgeProof::, XORInstruction, C, M>::prove( - &preprocessing, - &generators, - ops, - ); + let generators = HyperKZG::<_, KeccakTranscript>::setup(&[CommitShape::new( + M, + BatchType::SurgeReadWrite, + )]); + let (proof, debug_info) = SurgeProof::< + Fr, + HyperKZG, + XORInstruction, + C, + M, + KeccakTranscript, + >::prove(&preprocessing, &generators, ops); SurgeProof::verify(&preprocessing, &generators, proof, debug_info).expect("should work"); } @@ -697,13 +716,18 @@ mod tests { .collect(); let preprocessing = SurgePreprocessing::preprocess(); - let generators = HyperKZG::setup(&[CommitShape::new(M, BatchType::SurgeReadWrite)]); - let (proof, debug_info) = - SurgeProof::, XORInstruction, C, M>::prove( - &preprocessing, - &generators, - ops, - ); + let generators = HyperKZG::<_, KeccakTranscript>::setup(&[CommitShape::new( + M, + BatchType::SurgeReadWrite, + )]); + let (proof, debug_info) = SurgeProof::< + Fr, + HyperKZG, + XORInstruction, + C, + M, + KeccakTranscript, + >::prove(&preprocessing, &generators, ops); SurgeProof::verify(&preprocessing, &generators, proof, debug_info).expect("should work"); } diff --git a/jolt-core/src/poly/commitment/binius.rs b/jolt-core/src/poly/commitment/binius.rs index d1c5918b0..2b37fe70a 100644 --- a/jolt-core/src/poly/commitment/binius.rs +++ b/jolt-core/src/poly/commitment/binius.rs @@ -5,17 +5,20 @@ use crate::poly::commitment::commitment_scheme::CommitShape; use crate::poly::commitment::commitment_scheme::CommitmentScheme; use crate::poly::dense_mlpoly::DensePolynomial; use crate::utils::errors::ProofVerifyError; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use std::marker::PhantomData; #[derive(Clone)] -pub struct Binius128Scheme {} +pub struct Binius128Scheme { + _phantom: PhantomData, +} #[derive(Default, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] pub struct BiniusCommitment {} impl AppendToTranscript for BiniusCommitment { - fn append_to_transcript(&self, _transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, _transcript: &mut ProofTranscript) { todo!() } } @@ -29,7 +32,9 @@ pub struct BiniusBatchedProof {} #[derive(Clone)] pub struct None {} -impl CommitmentScheme for Binius128Scheme { +impl CommitmentScheme + for Binius128Scheme +{ type Field = crate::field::binius::BiniusField; type Setup = None; type Commitment = BiniusCommitment; diff --git a/jolt-core/src/poly/commitment/commitment_scheme.rs b/jolt-core/src/poly/commitment/commitment_scheme.rs index 89c99542e..6aef54bc0 100644 --- a/jolt-core/src/poly/commitment/commitment_scheme.rs +++ b/jolt-core/src/poly/commitment/commitment_scheme.rs @@ -1,13 +1,11 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use std::fmt::Debug; +use crate::utils::transcript::Transcript; use crate::{ field::JoltField, poly::dense_mlpoly::DensePolynomial, - utils::{ - errors::ProofVerifyError, - transcript::{AppendToTranscript, ProofTranscript}, - }, + utils::{errors::ProofVerifyError, transcript::AppendToTranscript}, }; #[derive(Clone, Debug)] @@ -34,7 +32,7 @@ pub enum BatchType { GrandProduct, } -pub trait CommitmentScheme: Clone + Sync + Send + 'static { +pub trait CommitmentScheme: Clone + Sync + Send + 'static { type Field: JoltField + Sized; type Setup: Clone + Sync + Send; type Commitment: Default diff --git a/jolt-core/src/poly/commitment/hyperkzg.rs b/jolt-core/src/poly/commitment/hyperkzg.rs index 9a9bfbd0b..8c4e3b2b4 100644 --- a/jolt-core/src/poly/commitment/hyperkzg.rs +++ b/jolt-core/src/poly/commitment/hyperkzg.rs @@ -16,13 +16,11 @@ use crate::field; use crate::poly::commitment::commitment_scheme::CommitShape; use crate::utils::mul_0_1_optimized; use crate::utils::thread::unsafe_allocate_zero_vec; +use crate::utils::transcript::Transcript; use crate::{ msm::VariableBaseMSM, poly::{commitment::kzg::SRS, dense_mlpoly::DensePolynomial, unipoly::UniPoly}, - utils::{ - errors::ProofVerifyError, - transcript::{AppendToTranscript, ProofTranscript}, - }, + utils::{errors::ProofVerifyError, transcript::AppendToTranscript}, }; use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -69,7 +67,7 @@ impl Default for HyperKZGCommitment

{ } impl AppendToTranscript for HyperKZGCommitment

{ - fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, transcript: &mut ProofTranscript) { transcript.append_point(&self.0.into_group()); } } @@ -155,7 +153,7 @@ where B } -fn kzg_open_batch( +fn kzg_open_batch( f: &[Vec], u: &[P::ScalarField], pk: &HyperKZGProverKey

, @@ -198,7 +196,7 @@ where } // vk is hashed in transcript already, so we do not add it here -fn kzg_verify_batch( +fn kzg_verify_batch( vk: &HyperKZGVerifierKey

, C: &[P::G1Affine], W: &[P::G1Affine], @@ -280,11 +278,11 @@ where } #[derive(Clone)] -pub struct HyperKZG { - _phantom: PhantomData

, +pub struct HyperKZG { + _phantom: PhantomData<(P, ProofTranscript)>, } -impl HyperKZG

+impl HyperKZG where

::ScalarField: field::JoltField, { @@ -469,7 +467,8 @@ where drop(span); let poly = DensePolynomial::new(f_batched); - HyperKZG::

::open(pk, &poly, point, &batched_evaluation, transcript).unwrap() + HyperKZG::::open(pk, &poly, point, &batched_evaluation, transcript) + .unwrap() } fn batch_verify( @@ -494,7 +493,7 @@ where (batched_evaluation, batched_commitment) }, ); - HyperKZG::

::verify( + HyperKZG::::verify( vk, &HyperKZGCommitment(batched_commitment.into_affine()), point, @@ -505,7 +504,8 @@ where } } -impl CommitmentScheme for HyperKZG

+impl CommitmentScheme + for HyperKZG where

::ScalarField: field::JoltField, { @@ -579,7 +579,8 @@ where transcript: &mut ProofTranscript, ) -> Self::Proof { let eval = poly.evaluate(opening_point); - HyperKZG::

::open(&setup.0, poly, opening_point, &eval, transcript).unwrap() + HyperKZG::::open(&setup.0, poly, opening_point, &eval, transcript) + .unwrap() } fn batch_prove( @@ -590,7 +591,13 @@ where _batch_type: BatchType, transcript: &mut ProofTranscript, ) -> Self::BatchedProof { - HyperKZG::

::batch_open(&setup.0, polynomials, opening_point, openings, transcript) + HyperKZG::::batch_open( + &setup.0, + polynomials, + opening_point, + openings, + transcript, + ) } fn combine_commitments( @@ -613,7 +620,7 @@ where opening: &Self::Field, // evaluation \widetilde{Z}(r) commitment: &Self::Commitment, ) -> Result<(), ProofVerifyError> { - HyperKZG::

::verify( + HyperKZG::::verify( &setup.1, commitment, opening_point, @@ -631,7 +638,7 @@ where commitments: &[&Self::Commitment], transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { - HyperKZG::

::batch_verify( + HyperKZG::::batch_verify( &setup.1, commitments, opening_point, @@ -649,6 +656,7 @@ where #[cfg(test)] mod tests { use super::*; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Bn254, Fr}; use ark_std::UniformRand; use rand_core::SeedableRng; @@ -663,12 +671,12 @@ mod tests { // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] let poly = DensePolynomial::new(vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]); - let C = HyperKZG::commit(&pk, &poly).unwrap(); + let C = HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap(); let test_inner = |point: Vec, eval: Fr| -> Result<(), ProofVerifyError> { - let mut tr = ProofTranscript::new(b"TestEval"); + let mut tr = KeccakTranscript::new(b"TestEval"); let proof = HyperKZG::open(&pk, &poly, &point, &eval, &mut tr).unwrap(); - let mut tr = ProofTranscript::new(b"TestEval"); + let mut tr = KeccakTranscript::new(b"TestEval"); HyperKZG::verify(&vk, &C, &point, &eval, &proof, &mut tr) }; @@ -721,15 +729,15 @@ mod tests { let (pk, vk): (HyperKZGProverKey, HyperKZGVerifierKey) = srs.trim(3); // make a commitment - let C = HyperKZG::commit(&pk, &poly).unwrap(); + let C = HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap(); // prove an evaluation - let mut tr = ProofTranscript::new(b"TestEval"); + let mut tr = KeccakTranscript::new(b"TestEval"); let proof = HyperKZG::open(&pk, &poly, &point, &eval, &mut tr).unwrap(); let post_c_p = tr.challenge_scalar::(); // verify the evaluation - let mut verifier_transcript = ProofTranscript::new(b"TestEval"); + let mut verifier_transcript = KeccakTranscript::new(b"TestEval"); assert!( HyperKZG::verify(&vk, &C, &point, &eval, &proof, &mut verifier_transcript,).is_ok() ); @@ -746,7 +754,7 @@ mod tests { let mut bad_proof = proof.clone(); let v1 = bad_proof.v[1].clone(); bad_proof.v[0].clone_from(&v1); - let mut verifier_transcript2 = ProofTranscript::new(b"TestEval"); + let mut verifier_transcript2 = KeccakTranscript::new(b"TestEval"); assert!(HyperKZG::verify( &vk, &C, @@ -780,22 +788,22 @@ mod tests { let (pk, vk): (HyperKZGProverKey, HyperKZGVerifierKey) = srs.trim(n); // make a commitment - let C = HyperKZG::commit(&pk, &poly).unwrap(); + let C = HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap(); // prove an evaluation - let mut prover_transcript = ProofTranscript::new(b"TestEval"); + let mut prover_transcript = KeccakTranscript::new(b"TestEval"); let proof: HyperKZGProof = HyperKZG::open(&pk, &poly, &point, &eval, &mut prover_transcript).unwrap(); // verify the evaluation - let mut verifier_tr = ProofTranscript::new(b"TestEval"); + let mut verifier_tr = KeccakTranscript::new(b"TestEval"); assert!(HyperKZG::verify(&vk, &C, &point, &eval, &proof, &mut verifier_tr,).is_ok()); // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); let v1 = bad_proof.v[1].clone(); bad_proof.v[0].clone_from(&v1); - let mut verifier_tr2 = ProofTranscript::new(b"TestEval"); + let mut verifier_tr2 = KeccakTranscript::new(b"TestEval"); assert!( HyperKZG::verify(&vk, &C, &point, &eval, &bad_proof, &mut verifier_tr2,).is_err() ); diff --git a/jolt-core/src/poly/commitment/hyrax.rs b/jolt-core/src/poly/commitment/hyrax.rs index cfffd7061..1bd43e5bc 100644 --- a/jolt-core/src/poly/commitment/hyrax.rs +++ b/jolt-core/src/poly/commitment/hyrax.rs @@ -7,7 +7,7 @@ use crate::poly::dense_mlpoly::DensePolynomial; use crate::poly::eq_poly::EqPolynomial; use crate::utils::errors::ProofVerifyError; use crate::utils::math::Math; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use crate::utils::{compute_dotproduct, mul_0_1_optimized}; use ark_ec::CurveGroup; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -18,8 +18,8 @@ use tracing::trace_span; use crate::msm::VariableBaseMSM; #[derive(Clone)] -pub struct HyraxScheme { - marker: PhantomData, +pub struct HyraxScheme { + marker: PhantomData<(G, ProofTranscript)>, } const TRACE_LEN_R1CS_POLYS_BATCH_RATIO: usize = 64; @@ -48,12 +48,14 @@ pub fn matrix_dimensions(num_vars: usize, ratio: usize) -> (usize, usize) { (col_size, row_size) } -impl> CommitmentScheme for HyraxScheme { +impl, ProofTranscript: Transcript> + CommitmentScheme for HyraxScheme +{ type Field = G::ScalarField; type Setup = PedersenGenerators; type Commitment = HyraxCommitment; - type Proof = HyraxOpeningProof; - type BatchedProof = BatchedHyraxOpeningProof; + type Proof = HyraxOpeningProof; + type BatchedProof = BatchedHyraxOpeningProof; fn setup(shapes: &[CommitShape]) -> Self::Setup { let mut max_len: usize = 0; @@ -249,7 +251,7 @@ impl> HyraxCommitment { } impl AppendToTranscript for HyraxCommitment { - fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, transcript: &mut ProofTranscript) { transcript.append_message(b"poly_commitment_begin"); for i in 0..self.row_commitments.len() { transcript.append_point(&self.row_commitments[i]); @@ -259,12 +261,18 @@ impl AppendToTranscript for HyraxCommitment { } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct HyraxOpeningProof { +pub struct HyraxOpeningProof { pub vector_matrix_product: Vec, + _marker: PhantomData, } /// See Section 14.3 of Thaler's Proofs, Arguments, and Zero-Knowledge -impl> HyraxOpeningProof { +impl HyraxOpeningProof +where + F: JoltField, + G: CurveGroup, + ProofTranscript: Transcript, +{ fn protocol_name() -> &'static [u8] { b"Hyrax opening proof" } @@ -275,8 +283,9 @@ impl> HyraxOpeningProof { opening_point: &[G::ScalarField], // point at which the polynomial is evaluated ratio: usize, transcript: &mut ProofTranscript, - ) -> HyraxOpeningProof { - transcript.append_protocol_name(Self::protocol_name()); + ) -> HyraxOpeningProof { + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); // assert vectors are of the right size assert_eq!(poly.get_num_vars(), opening_point.len()); @@ -291,6 +300,7 @@ impl> HyraxOpeningProof { HyraxOpeningProof { vector_matrix_product, + _marker: PhantomData, } } @@ -303,7 +313,8 @@ impl> HyraxOpeningProof { commitment: &HyraxCommitment, ratio: usize, ) -> Result<(), ProofVerifyError> { - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); // compute L and R let (L_size, R_size) = matrix_dimensions(opening_point.len(), ratio); @@ -356,13 +367,16 @@ impl> HyraxOpeningProof { } #[derive(Debug, CanonicalSerialize, CanonicalDeserialize)] -pub struct BatchedHyraxOpeningProof { - pub joint_proof: HyraxOpeningProof, +pub struct BatchedHyraxOpeningProof { + pub joint_proof: HyraxOpeningProof, pub ratio: usize, + _marker: PhantomData, } /// See Section 16.1 of Thaler's Proofs, Arguments, and Zero-Knowledge -impl> BatchedHyraxOpeningProof { +impl, ProofTranscript: Transcript> + BatchedHyraxOpeningProof +{ #[tracing::instrument(skip_all, name = "BatchedHyraxOpeningProof::prove")] pub fn prove( polynomials: &[&DensePolynomial], @@ -371,7 +385,8 @@ impl> BatchedHyraxOpeningProof { batch_type: BatchType, transcript: &mut ProofTranscript, ) -> Self { - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); // append the claimed evaluations to transcript transcript.append_scalars(openings); @@ -431,7 +446,11 @@ impl> BatchedHyraxOpeningProof { transcript, ); - Self { joint_proof, ratio } + Self { + joint_proof, + ratio, + _marker: PhantomData, + } } #[tracing::instrument(skip_all, name = "BatchedHyraxOpeningProof::verify")] @@ -455,7 +474,8 @@ impl> BatchedHyraxOpeningProof { ) }); - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); // append the claimed evaluations to transcript transcript.append_scalars(openings); @@ -506,6 +526,7 @@ impl> BatchedHyraxOpeningProof { #[cfg(test)] mod tests { use super::*; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Fr, G1Projective}; #[test] @@ -538,10 +559,10 @@ mod tests { let generators: PedersenGenerators = PedersenGenerators::new(1 << 8, b"test-two"); let poly_commitment: HyraxCommitment = HyraxCommitment::commit(&poly, &generators); - let mut prover_transcript = ProofTranscript::new(b"example"); + let mut prover_transcript = KeccakTranscript::new(b"example"); let proof = HyraxOpeningProof::prove(&poly, &r, RATIO, &mut prover_transcript); - let mut verifier_transcript = ProofTranscript::new(b"example"); + let mut verifier_transcript = KeccakTranscript::new(b"example"); assert!(proof .verify( diff --git a/jolt-core/src/poly/commitment/mock.rs b/jolt-core/src/poly/commitment/mock.rs index a5e3dfa90..9bf6a4417 100644 --- a/jolt-core/src/poly/commitment/mock.rs +++ b/jolt-core/src/poly/commitment/mock.rs @@ -7,15 +7,15 @@ use crate::{ poly::dense_mlpoly::DensePolynomial, utils::{ errors::ProofVerifyError, - transcript::{AppendToTranscript, ProofTranscript}, + transcript::{AppendToTranscript, Transcript}, }, }; use super::commitment_scheme::{BatchType, CommitShape, CommitmentScheme}; #[derive(Clone)] -pub struct MockCommitScheme { - _marker: PhantomData, +pub struct MockCommitScheme { + _marker: PhantomData<(F, ProofTranscript)>, } #[derive(CanonicalSerialize, CanonicalDeserialize, Default, Debug, PartialEq)] @@ -24,7 +24,7 @@ pub struct MockCommitment { } impl AppendToTranscript for MockCommitment { - fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, transcript: &mut ProofTranscript) { transcript.append_message(b"mocker"); } } @@ -34,7 +34,11 @@ pub struct MockProof { opening_point: Vec, } -impl CommitmentScheme for MockCommitScheme { +impl CommitmentScheme for MockCommitScheme +where + F: JoltField, + ProofTranscript: Transcript, +{ type Field = F; type Setup = (); type Commitment = MockCommitment; diff --git a/jolt-core/src/poly/commitment/zeromorph.rs b/jolt-core/src/poly/commitment/zeromorph.rs index c46ed7736..505bd629c 100644 --- a/jolt-core/src/poly/commitment/zeromorph.rs +++ b/jolt-core/src/poly/commitment/zeromorph.rs @@ -10,7 +10,7 @@ use crate::utils::mul_0_1_optimized; use crate::utils::thread::unsafe_allocate_zero_vec; use crate::utils::{ errors::ProofVerifyError, - transcript::{AppendToTranscript, ProofTranscript}, + transcript::{AppendToTranscript, Transcript}, }; use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; use ark_ff::{batch_inversion, Field}; @@ -74,7 +74,7 @@ impl Default for ZeromorphCommitment

{ } impl AppendToTranscript for ZeromorphCommitment

{ - fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, transcript: &mut ProofTranscript) { transcript.append_point(&self.0.into_group()); } } @@ -222,13 +222,15 @@ where } #[derive(Clone)] -pub struct Zeromorph { - _phantom: PhantomData

, +pub struct Zeromorph { + _phantom: PhantomData<(P, ProofTranscript)>, } -impl Zeromorph

+impl Zeromorph where

::ScalarField: field::JoltField, + P: Pairing, + ProofTranscript: Transcript, { pub fn protocol_name() -> &'static [u8] { b"Zeromorph" @@ -258,7 +260,8 @@ where eval: &P::ScalarField, transcript: &mut ProofTranscript, ) -> Result, ProofVerifyError> { - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); if pp.commit_pp.g1_powers().len() < poly.Z.len() { return Err(ProofVerifyError::KeyLengthError( @@ -375,7 +378,8 @@ where drop(span); let poly = DensePolynomial::new(f_batched); - Zeromorph::

::open(pk, &poly, point, &batched_evaluation, transcript).unwrap() + Zeromorph::::open(pk, &poly, point, &batched_evaluation, transcript) + .unwrap() } fn batch_verify( @@ -400,7 +404,7 @@ where (batched_evaluation, batched_commitment) }, ); - Zeromorph::

::verify( + Zeromorph::::verify( vk, &ZeromorphCommitment(batched_commitment.into_affine()), point, @@ -418,7 +422,8 @@ where proof: &ZeromorphProof

, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { - transcript.append_protocol_name(Self::protocol_name()); + let protocol_name = Self::protocol_name(); + transcript.append_message(protocol_name); let q_comms: Vec = proof.q_k_com.iter().map(|c| c.into_group()).collect(); q_comms.iter().for_each(|c| transcript.append_point(c)); @@ -474,7 +479,8 @@ where } } -impl CommitmentScheme for Zeromorph

+impl CommitmentScheme + for Zeromorph where

::ScalarField: field::JoltField, { @@ -545,7 +551,8 @@ where transcript: &mut ProofTranscript, ) -> Self::Proof { let eval = poly.evaluate(opening_point); - Zeromorph::

::open(&setup.0, poly, opening_point, &eval, transcript).unwrap() + Zeromorph::::open(&setup.0, poly, opening_point, &eval, transcript) + .unwrap() } fn batch_prove( @@ -556,7 +563,13 @@ where _batch_type: BatchType, transcript: &mut ProofTranscript, ) -> Self::BatchedProof { - Zeromorph::

::batch_open(&setup.0, polynomials, opening_point, openings, transcript) + Zeromorph::::batch_open( + &setup.0, + polynomials, + opening_point, + openings, + transcript, + ) } fn combine_commitments( @@ -579,7 +592,7 @@ where opening: &Self::Field, // evaluation \widetilde{Z}(r) commitment: &Self::Commitment, ) -> Result<(), ProofVerifyError> { - Zeromorph::

::verify( + Zeromorph::::verify( &setup.1, commitment, opening_point, @@ -597,7 +610,7 @@ where commitments: &[&Self::Commitment], transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { - Zeromorph::

::batch_verify( + Zeromorph::::batch_verify( &setup.1, commitments, opening_point, @@ -616,6 +629,7 @@ where mod test { use super::*; use crate::utils::math::Math; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Bn254, Fr}; use ark_ff::{BigInt, Zero}; use ark_std::{test_rng, UniformRand}; @@ -875,17 +889,23 @@ mod test { let srs = ZeromorphSRS::::setup(&mut rng, 1 << num_vars); let (pk, vk) = srs.trim(1 << num_vars); - let commitment = Zeromorph::::commit(&pk, &poly).unwrap(); + let commitment = Zeromorph::::commit(&pk, &poly).unwrap(); - let mut prover_transcript = ProofTranscript::new(b"TestEval"); - let proof = Zeromorph::::open(&pk, &poly, &point, &eval, &mut prover_transcript) - .unwrap(); + let mut prover_transcript = KeccakTranscript::new(b"TestEval"); + let proof = Zeromorph::::open( + &pk, + &poly, + &point, + &eval, + &mut prover_transcript, + ) + .unwrap(); let p_transcipt_squeeze: ::ScalarField = prover_transcript.challenge_scalar(); // Verify proof. - let mut verifier_transcript = ProofTranscript::new(b"TestEval"); - Zeromorph::::verify( + let mut verifier_transcript = KeccakTranscript::new(b"TestEval"); + Zeromorph::::verify( &vk, &commitment, &point, @@ -905,8 +925,8 @@ mod test { .map(|s| *s + ::ScalarField::one()) .collect::>(); let altered_verifier_eval = poly.evaluate(&altered_verifier_point); - let mut verifier_transcript = ProofTranscript::new(b"TestEval"); - assert!(Zeromorph::::verify( + let mut verifier_transcript = KeccakTranscript::new(b"TestEval"); + assert!(Zeromorph::::verify( &vk, &commitment, &altered_verifier_point, @@ -934,14 +954,14 @@ mod test { let (pk, vk) = srs.trim(1 << num_vars); let commitments: Vec<_> = polys .iter() - .map(|poly| Zeromorph::::commit(&pk, poly).unwrap()) + .map(|poly| Zeromorph::::commit(&pk, poly).unwrap()) .collect(); let commitments_refs: Vec<_> = commitments.iter().collect(); let polys_refs: Vec<_> = polys.iter().collect(); - let mut prover_transcript = ProofTranscript::new(b"TestEval"); - let proof = Zeromorph::::batch_open( + let mut prover_transcript = KeccakTranscript::new(b"TestEval"); + let proof = Zeromorph::::batch_open( &pk, &polys_refs, &point, @@ -952,8 +972,8 @@ mod test { prover_transcript.challenge_scalar(); // Verify proof. - let mut verifier_transcript = ProofTranscript::new(b"TestEval"); - Zeromorph::::batch_verify( + let mut verifier_transcript = KeccakTranscript::new(b"TestEval"); + Zeromorph::::batch_verify( &vk, &commitments_refs, &point, @@ -976,8 +996,8 @@ mod test { .iter() .map(|poly| poly.evaluate(&altered_verifier_point)) .collect(); - let mut verifier_transcript = ProofTranscript::new(b"TestEval"); - assert!(Zeromorph::::batch_verify( + let mut verifier_transcript = KeccakTranscript::new(b"TestEval"); + assert!(Zeromorph::::batch_verify( &vk, &commitments_refs, &altered_verifier_point, diff --git a/jolt-core/src/poly/opening_proof.rs b/jolt-core/src/poly/opening_proof.rs index c62213994..92b32ae06 100644 --- a/jolt-core/src/poly/opening_proof.rs +++ b/jolt-core/src/poly/opening_proof.rs @@ -3,26 +3,27 @@ //! For additively homomorphic commitment schemes (including Zeromorph, HyperKZG) we //! can use a sumcheck to reduce multiple opening proofs (multiple polynomials, not //! necessarily of the same size, each opened at a different point) into a single opening. + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use rayon::prelude::*; +use std::marker::PhantomData; +use super::{ + commitment::commitment_scheme::CommitmentScheme, + dense_mlpoly::DensePolynomial, + eq_poly::EqPolynomial, + unipoly::{CompressedUniPoly, UniPoly}, +}; use crate::{ field::{JoltField, OptimizedMul}, subprotocols::sumcheck::SumcheckInstanceProof, utils::{ errors::ProofVerifyError, thread::unsafe_allocate_zero_vec, - transcript::{AppendToTranscript, ProofTranscript}, + transcript::{AppendToTranscript, Transcript}, }, }; -use super::{ - commitment::commitment_scheme::CommitmentScheme, - dense_mlpoly::DensePolynomial, - eq_poly::EqPolynomial, - unipoly::{CompressedUniPoly, UniPoly}, -}; - /// An opening computed by the prover. /// May be a batched opening, where multiple polynomials opened /// at the *same* point are reduced to a single polynomial opened @@ -53,7 +54,12 @@ pub struct ProverOpening { /// at the (same) point. /// Multiple `VerifierOpening`s can be accumulated and further /// batched/reduced using a `VerifierOpeningAccumulator`. -pub struct VerifierOpening> { +pub struct VerifierOpening +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ /// The commitments to the opened polynomial. May be a random linear combination /// of multiple (additively homomorphic) polynomials, all being opened at the /// same point. @@ -82,7 +88,12 @@ impl ProverOpening { } } -impl> VerifierOpening { +impl VerifierOpening +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ fn new(commitment: PCS::Commitment, opening_point: Vec, claim: F) -> Self { VerifierOpening { commitment, @@ -94,14 +105,20 @@ impl> VerifierOpening { /// Accumulates openings computed by the prover over the course of Jolt, /// so that they can all be reduced to a single opening proof using sumcheck. -pub struct ProverOpeningAccumulator { +pub struct ProverOpeningAccumulator { openings: Vec>, + _marker: PhantomData, } /// Accumulates openings encountered by the verifier over the course of Jolt, /// so that they can all be reduced to a single opening proof verification using sumcheck. -pub struct VerifierOpeningAccumulator> { - openings: Vec>, +pub struct VerifierOpeningAccumulator +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ + openings: Vec>, #[cfg(test)] /// In testing, the Jolt verifier may be provided the prover's openings so that we /// can detect any places where the openings don't match up. @@ -111,21 +128,30 @@ pub struct VerifierOpeningAccumulator> { - sumcheck_proof: SumcheckInstanceProof, +pub struct ReducedOpeningProof< + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +> { + sumcheck_proof: SumcheckInstanceProof, sumcheck_claims: Vec, joint_opening_proof: PCS::Proof, } -impl Default for ProverOpeningAccumulator { +impl Default + for ProverOpeningAccumulator +{ fn default() -> Self { Self::new() } } -impl ProverOpeningAccumulator { +impl ProverOpeningAccumulator { pub fn new() -> Self { - Self { openings: vec![] } + Self { + openings: vec![], + _marker: PhantomData, + } } pub fn len(&self) -> usize { @@ -226,11 +252,11 @@ impl ProverOpeningAccumulator { /// Reduces the multiple openings accumulated into a single opening proof, /// using a single sumcheck. #[tracing::instrument(skip_all, name = "ProverOpeningAccumulator::reduce_and_prove")] - pub fn reduce_and_prove>( + pub fn reduce_and_prove>( &mut self, pcs_setup: &PCS::Setup, transcript: &mut ProofTranscript, - ) -> ReducedOpeningProof { + ) -> ReducedOpeningProof { // Generate coefficients for random linear combination let rho: F = transcript.challenge_scalar(); let mut rho_powers = vec![F::one()]; @@ -302,7 +328,7 @@ impl ProverOpeningAccumulator { &mut self, coeffs: &[F], transcript: &mut ProofTranscript, - ) -> (SumcheckInstanceProof, Vec, Vec) { + ) -> (SumcheckInstanceProof, Vec, Vec) { let max_num_vars = self .openings .iter() @@ -448,15 +474,23 @@ impl ProverOpeningAccumulator { } } -impl> Default - for VerifierOpeningAccumulator +impl Default for VerifierOpeningAccumulator +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { fn default() -> Self { Self::new() } } -impl> VerifierOpeningAccumulator { +impl VerifierOpeningAccumulator +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ pub fn new() -> Self { Self { openings: vec![], @@ -472,7 +506,7 @@ impl> VerifierOpeningAccumulator< #[cfg(test)] pub fn compare_to( &mut self, - prover_openings: ProverOpeningAccumulator, + prover_openings: ProverOpeningAccumulator, pcs_setup: &PCS::Setup, ) { self.prover_openings = Some(prover_openings.openings); @@ -564,7 +598,7 @@ impl> VerifierOpeningAccumulator< pub fn reduce_and_verify( &self, pcs_setup: &PCS::Setup, - reduced_opening_proof: &ReducedOpeningProof, + reduced_opening_proof: &ReducedOpeningProof, transcript: &mut ProofTranscript, ) -> Result<(), ProofVerifyError> { let num_sumcheck_rounds = self @@ -655,7 +689,7 @@ impl> VerifierOpeningAccumulator< &self, coeffs: &[F], num_sumcheck_rounds: usize, - sumcheck_proof: &SumcheckInstanceProof, + sumcheck_proof: &SumcheckInstanceProof, transcript: &mut ProofTranscript, ) -> Result<(F, Vec), ProofVerifyError> { let combined_claim: F = coeffs diff --git a/jolt-core/src/poly/unipoly.rs b/jolt-core/src/poly/unipoly.rs index bc18c72de..85ba65a87 100644 --- a/jolt-core/src/poly/unipoly.rs +++ b/jolt-core/src/poly/unipoly.rs @@ -4,7 +4,7 @@ use std::cmp::Ordering; use std::ops::{AddAssign, Index, IndexMut, Mul, MulAssign}; use crate::utils::gaussian_elimination::gaussian_elimination; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use ark_serialize::*; use rand_core::{CryptoRng, RngCore}; use rayon::iter::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; @@ -252,7 +252,7 @@ impl CompressedUniPoly { } impl AppendToTranscript for CompressedUniPoly { - fn append_to_transcript(&self, transcript: &mut ProofTranscript) { + fn append_to_transcript(&self, transcript: &mut ProofTranscript) { transcript.append_message(b"UniPoly_begin"); for i in 0..self.coeffs_except_linear_term.len() { transcript.append_scalar(&self.coeffs_except_linear_term[i]); diff --git a/jolt-core/src/r1cs/builder.rs b/jolt-core/src/r1cs/builder.rs index e52944e8a..39c8efeeb 100644 --- a/jolt-core/src/r1cs/builder.rs +++ b/jolt-core/src/r1cs/builder.rs @@ -1,3 +1,10 @@ +use super::{ + inputs::ConstraintInput, + key::{NonUniformR1CS, NonUniformR1CSConstraint, SparseEqualityItem}, + ops::{Term, Variable, LC}, + special_polys::SparsePolynomial, +}; +use crate::utils::transcript::Transcript; use crate::{ field::JoltField, jolt::vm::JoltPolynomials, @@ -12,13 +19,6 @@ use crate::{ use rayon::prelude::*; use std::{collections::BTreeMap, marker::PhantomData}; -use super::{ - inputs::ConstraintInput, - key::{NonUniformR1CS, NonUniformR1CSConstraint, SparseEqualityItem}, - ops::{Term, Variable, LC}, - special_polys::SparsePolynomial, -}; - /// Constraints over a single row. Each variable points to a single item in Z and the corresponding coefficient. #[derive(Clone)] struct Constraint { @@ -636,7 +636,10 @@ impl CombinedUniformBuilder>( + pub fn compute_spartan_Az_Bz_Cz< + PCS: CommitmentScheme, + ProofTranscript: Transcript, + >( &self, flattened_polynomials: &[&DensePolynomial], ) -> ( diff --git a/jolt-core/src/r1cs/inputs.rs b/jolt-core/src/r1cs/inputs.rs index 929eccf5c..2755aeba6 100644 --- a/jolt-core/src/r1cs/inputs.rs +++ b/jolt-core/src/r1cs/inputs.rs @@ -13,7 +13,7 @@ use crate::poly::commitment::commitment_scheme::CommitmentScheme; use crate::poly::dense_mlpoly::DensePolynomial; use crate::poly::opening_proof::VerifierOpeningAccumulator; use crate::utils::thread::unsafe_allocate_zero_vec; -use crate::utils::transcript::ProofTranscript; +use crate::utils::transcript::Transcript; use super::key::UniformSpartanKey; use super::spartan::{SpartanError, UniformSpartanProof}; @@ -24,6 +24,7 @@ use ark_std::log2; use common::constants::RAM_OPS_PER_INSTRUCTION; use common::rv_trace::{CircuitFlags, NUM_CIRCUIT_FLAGS}; use std::fmt::Debug; +use std::marker::PhantomData; use strum::IntoEnumIterator; use strum_macros::EnumIter; @@ -157,7 +158,8 @@ pub type R1CSOpenings = R1CSStuff; /// See issue #112792 . /// Adding #![feature(lazy_type_alias)] to the crate attributes seem to break /// `alloy_sol_types`. -pub type R1CSCommitments = R1CSStuff; +pub type R1CSCommitments, ProofTranscript: Transcript> = + R1CSStuff; impl R1CSPolynomials { pub fn new< @@ -213,19 +215,27 @@ impl R1CSPolynomials { } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct R1CSProof { +pub struct R1CSProof +{ pub key: UniformSpartanKey, - pub proof: UniformSpartanProof, + pub proof: UniformSpartanProof, + pub _marker: PhantomData, } -impl R1CSProof { +impl + R1CSProof +{ #[tracing::instrument(skip_all, name = "R1CSProof::verify")] - pub fn verify>( + pub fn verify( &self, - commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, - ) -> Result<(), SpartanError> { + ) -> Result<(), SpartanError> + where + PCS: CommitmentScheme, + ProofTranscript: Transcript, + { self.proof .verify(&self.key, commitments, opening_accumulator, transcript) } diff --git a/jolt-core/src/r1cs/spartan.rs b/jolt-core/src/r1cs/spartan.rs index 2a4df63d2..91728b40e 100644 --- a/jolt-core/src/r1cs/spartan.rs +++ b/jolt-core/src/r1cs/spartan.rs @@ -12,7 +12,7 @@ use crate::r1cs::key::UniformSpartanKey; use crate::utils::math::Math; use crate::utils::thread::drop_in_background_thread; -use crate::utils::transcript::ProofTranscript; +use crate::utils::transcript::Transcript; use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; @@ -66,15 +66,26 @@ pub enum SpartanError { /// The proof is produced using Spartan's combination of the sum-check and /// the commitment to a vector viewed as a polynomial commitment #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct UniformSpartanProof { +pub struct UniformSpartanProof< + const C: usize, + I: ConstraintInput, + F: JoltField, + ProofTranscript: Transcript, +> { _inputs: PhantomData, - pub(crate) outer_sumcheck_proof: SumcheckInstanceProof, + pub(crate) outer_sumcheck_proof: SumcheckInstanceProof, pub(crate) outer_sumcheck_claims: (F, F, F), - pub(crate) inner_sumcheck_proof: SumcheckInstanceProof, + pub(crate) inner_sumcheck_proof: SumcheckInstanceProof, pub(crate) claimed_witness_evals: Vec, + _marker: PhantomData, } -impl UniformSpartanProof { +impl UniformSpartanProof +where + I: ConstraintInput, + F: JoltField, + ProofTranscript: Transcript, +{ #[tracing::instrument(skip_all, name = "Spartan::setup")] pub fn setup( constraint_builder: &CombinedUniformBuilder, @@ -88,13 +99,16 @@ impl UniformSpartanProof>( + pub fn prove( constraint_builder: &CombinedUniformBuilder, key: &UniformSpartanKey, polynomials: &JoltPolynomials, - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, - ) -> Result { + ) -> Result + where + PCS: CommitmentScheme, + { let flattened_polys: Vec<&DensePolynomial> = I::flatten::() .iter() .map(|var| var.get_ref(polynomials)) @@ -110,7 +124,7 @@ impl UniformSpartanProof(&flattened_polys); + constraint_builder.compute_spartan_Az_Bz_Cz::(&flattened_polys); let comb_func_outer = |eq: &F, az: &F, bz: &F, cz: &F| -> F { // Below is an optimized form of: eq * (Az * Bz - Cz) @@ -209,17 +223,22 @@ impl UniformSpartanProof>( + pub fn verify( &self, key: &UniformSpartanKey, - commitments: &JoltCommitments, - opening_accumulator: &mut VerifierOpeningAccumulator, + commitments: &JoltCommitments, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, - ) -> Result<(), SpartanError> { + ) -> Result<(), SpartanError> + where + PCS: CommitmentScheme, + ProofTranscript: Transcript, + { let num_rounds_x = key.num_rows_total().log_2(); let num_rounds_y = key.num_cols_total().log_2(); diff --git a/jolt-core/src/subprotocols/grand_product.rs b/jolt-core/src/subprotocols/grand_product.rs index 0cdee5c80..5f69bb772 100644 --- a/jolt-core/src/subprotocols/grand_product.rs +++ b/jolt-core/src/subprotocols/grand_product.rs @@ -7,20 +7,22 @@ use crate::poly::opening_proof::{ProverOpeningAccumulator, VerifierOpeningAccumu use crate::poly::{dense_mlpoly::DensePolynomial, unipoly::UniPoly}; use crate::utils::math::Math; use crate::utils::thread::drop_in_background_thread; -use crate::utils::transcript::ProofTranscript; +use crate::utils::transcript::Transcript; use ark_ff::Zero; use ark_serialize::*; use itertools::Itertools; use rayon::prelude::*; +use std::marker::PhantomData; #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct BatchedGrandProductLayerProof { - pub proof: SumcheckInstanceProof, +pub struct BatchedGrandProductLayerProof { + pub proof: SumcheckInstanceProof, pub left_claims: Vec, pub right_claims: Vec, + _marker: PhantomData, } -impl BatchedGrandProductLayerProof { +impl BatchedGrandProductLayerProof { pub fn verify( &self, claim: F, @@ -35,12 +37,21 @@ impl BatchedGrandProductLayerProof { } #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct BatchedGrandProductProof { - pub layers: Vec>, - pub quark_proof: Option>, +pub struct BatchedGrandProductProof +where + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ + pub layers: Vec>, + pub quark_proof: Option>, } -pub trait BatchedGrandProduct>: Sized { +pub trait BatchedGrandProduct: Sized +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ /// The bottom/input layer of the grand products type Leaves; type Config: Default + Clone + Copy; @@ -58,16 +69,18 @@ pub trait BatchedGrandProduct>: S /// Returns an iterator over the layers of this batched grand product circuit. /// Each layer is mutable so that its polynomials can be bound over the course /// of proving. - fn layers(&'_ mut self) -> impl Iterator>; + fn layers( + &'_ mut self, + ) -> impl Iterator>; /// Computes a batched grand product proof, layer by layer. #[tracing::instrument(skip_all, name = "BatchedGrandProduct::prove_grand_product")] fn prove_grand_product( &mut self, - _opening_accumulator: Option<&mut ProverOpeningAccumulator>, + _opening_accumulator: Option<&mut ProverOpeningAccumulator>, transcript: &mut ProofTranscript, _setup: Option<&PCS::Setup>, - ) -> (BatchedGrandProductProof, Vec) { + ) -> (BatchedGrandProductProof, Vec) { let mut proof_layers = Vec::with_capacity(self.num_layers()); let mut claims_to_verify = self.claims(); let mut r_grand_product = Vec::new(); @@ -94,7 +107,7 @@ pub trait BatchedGrandProduct>: S /// This function may be overridden if the layer isn't just multiplication gates, e.g. in the /// case of `ToggledBatchedGrandProduct`. fn verify_sumcheck_claim( - layer_proofs: &[BatchedGrandProductLayerProof], + layer_proofs: &[BatchedGrandProductLayerProof], layer_index: usize, coeffs: &[F], sumcheck_claim: F, @@ -125,7 +138,7 @@ pub trait BatchedGrandProduct>: S /// Function used for layer sumchecks in the generic batch verifier as well as the quark layered sumcheck hybrid fn verify_layers( - proof_layers: &[BatchedGrandProductLayerProof], + proof_layers: &[BatchedGrandProductLayerProof], claims: &Vec, transcript: &mut ProofTranscript, r_start: Vec, @@ -188,9 +201,9 @@ pub trait BatchedGrandProduct>: S /// Verifies the given grand product proof. fn verify_grand_product( - proof: &BatchedGrandProductProof, + proof: &BatchedGrandProductProof, claims: &Vec, - _opening_accumulator: Option<&mut VerifierOpeningAccumulator>, + _opening_accumulator: Option<&mut VerifierOpeningAccumulator>, transcript: &mut ProofTranscript, _setup: Option<&PCS::Setup>, ) -> (Vec, Vec) { @@ -201,14 +214,19 @@ pub trait BatchedGrandProduct>: S } } -pub trait BatchedGrandProductLayer: BatchedCubicSumcheck { +pub trait BatchedGrandProductLayer: + BatchedCubicSumcheck +where + F: JoltField, + ProofTranscript: Transcript, +{ /// Proves a single layer of a batched grand product circuit fn prove_layer( &mut self, claims: &mut Vec, r_grand_product: &mut Vec, transcript: &mut ProofTranscript, - ) -> BatchedGrandProductLayerProof { + ) -> BatchedGrandProductLayerProof { // produce a fresh set of coeffs let coeffs: Vec = transcript.challenge_vector(claims.len()); // produce a joint claim @@ -251,6 +269,7 @@ pub trait BatchedGrandProductLayer: BatchedCubicSumcheck { proof: sumcheck_proof, left_claims, right_claims, + _marker: PhantomData, } } } @@ -266,23 +285,30 @@ pub type DenseGrandProductLayer = Vec; /// Represents a batch of `DenseGrandProductLayer`, all of the same length `layer_len`. #[derive(Debug, Clone)] -pub struct BatchedDenseGrandProductLayer { +pub struct BatchedDenseGrandProductLayer { pub layers: Vec>, pub layer_len: usize, + _marker: PhantomData, } -impl BatchedDenseGrandProductLayer { +impl BatchedDenseGrandProductLayer { pub fn new(values: Vec>) -> Self { let layer_len = values[0].len(); Self { layers: values, layer_len, + _marker: PhantomData, } } } -impl BatchedGrandProductLayer for BatchedDenseGrandProductLayer {} -impl BatchedCubicSumcheck for BatchedDenseGrandProductLayer { +impl BatchedGrandProductLayer + for BatchedDenseGrandProductLayer +{ +} +impl BatchedCubicSumcheck + for BatchedDenseGrandProductLayer +{ fn num_rounds(&self) -> usize { self.layer_len.log_2() - 1 } @@ -416,12 +442,17 @@ impl BatchedCubicSumcheck for BatchedDenseGrandProductLayer /// / \ / \ /// o o o o <- layers[layers.len() - 2] /// ... -pub struct BatchedDenseGrandProduct { - layers: Vec>, +pub struct BatchedDenseGrandProduct { + layers: Vec>, + _marker: PhantomData, } -impl> BatchedGrandProduct - for BatchedDenseGrandProduct +impl BatchedGrandProduct + for BatchedDenseGrandProduct +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { type Leaves = Vec>; type Config = (); @@ -429,7 +460,8 @@ impl> BatchedGrandProduct #[tracing::instrument(skip_all, name = "BatchedDenseGrandProduct::construct")] fn construct(leaves: Self::Leaves) -> Self { let num_layers = leaves[0].len().log_2(); - let mut layers: Vec> = Vec::with_capacity(num_layers); + let mut layers: Vec> = + Vec::with_capacity(num_layers); layers.push(BatchedDenseGrandProductLayer::new(leaves)); for i in 0..num_layers - 1 { @@ -448,11 +480,14 @@ impl> BatchedGrandProduct layers.push(BatchedDenseGrandProductLayer::new(new_layers)); } - Self { layers } + Self { + layers, + _marker: PhantomData, + } } #[tracing::instrument(skip_all, name = "BatchedDenseGrandProduct::construct_with_config")] fn construct_with_config(leaves: Self::Leaves, _config: Self::Config) -> Self { - >::construct(leaves) + >::construct(leaves) } fn num_layers(&self) -> usize { @@ -460,8 +495,11 @@ impl> BatchedGrandProduct } fn claims(&self) -> Vec { - let num_layers = - as BatchedGrandProduct>::num_layers(self); + let num_layers = as BatchedGrandProduct< + F, + PCS, + ProofTranscript, + >>::num_layers(self); let last_layers = &self.layers[num_layers - 1]; assert_eq!(last_layers.layer_len, 2); last_layers @@ -471,10 +509,12 @@ impl> BatchedGrandProduct .collect() } - fn layers(&'_ mut self) -> impl Iterator> { + fn layers( + &'_ mut self, + ) -> impl Iterator> { self.layers .iter_mut() - .map(|layer| layer as &mut dyn BatchedGrandProductLayer) + .map(|layer| layer as &mut dyn BatchedGrandProductLayer) .rev() } } @@ -625,13 +665,19 @@ impl DynamicDensityGrandProductLayer { /// size `layer_len`. Note that within a single batch, some layers may be represented by /// sparse vectors and others by dense vectors. #[derive(Debug, Clone)] -pub struct BatchedSparseGrandProductLayer { +pub struct BatchedSparseGrandProductLayer { pub layer_len: usize, pub layers: Vec>, + _marker: PhantomData, } -impl BatchedGrandProductLayer for BatchedSparseGrandProductLayer {} -impl BatchedCubicSumcheck for BatchedSparseGrandProductLayer { +impl BatchedGrandProductLayer + for BatchedSparseGrandProductLayer +{ +} +impl BatchedCubicSumcheck + for BatchedSparseGrandProductLayer +{ fn num_rounds(&self) -> usize { self.layer_len.log_2() - 1 } @@ -1056,7 +1102,7 @@ impl BatchedCubicSumcheck for BatchedSparseGrandProductLayer /// o o o o ↑ /// / \ / \ / \ / \ ––––––––––––––––––––––––––––––––––––––––––– /// 🏴 o 🏳️ o 🏳️ o 🏴 o toggle layer ↓ -struct BatchedGrandProductToggleLayer { +struct BatchedGrandProductToggleLayer { /// The list of non-zero flag indices for each layer in the batch. flag_indices: Vec>, /// The list of non-zero flag values for each layer in the batch. @@ -1065,9 +1111,10 @@ struct BatchedGrandProductToggleLayer { flag_values: Vec>, fingerprints: Vec>, layer_len: usize, + _marker: PhantomData, } -impl BatchedGrandProductToggleLayer { +impl BatchedGrandProductToggleLayer { fn new(flag_indices: Vec>, fingerprints: Vec>) -> Self { let layer_len = fingerprints[0].len(); Self { @@ -1076,10 +1123,11 @@ impl BatchedGrandProductToggleLayer { flag_values: vec![], fingerprints, layer_len, + _marker: PhantomData, } } - fn layer_output(&self) -> BatchedSparseGrandProductLayer { + fn layer_output(&self) -> BatchedSparseGrandProductLayer { let output_layers = self .fingerprints .par_iter() @@ -1096,11 +1144,14 @@ impl BatchedGrandProductToggleLayer { BatchedSparseGrandProductLayer { layer_len: self.layer_len, layers: output_layers, + _marker: PhantomData, } } } -impl BatchedCubicSumcheck for BatchedGrandProductToggleLayer { +impl BatchedCubicSumcheck + for BatchedGrandProductToggleLayer +{ fn num_rounds(&self) -> usize { self.layer_len.log_2() } @@ -1387,13 +1438,15 @@ impl BatchedCubicSumcheck for BatchedGrandProductToggleLayer } } -impl BatchedGrandProductLayer for BatchedGrandProductToggleLayer { +impl BatchedGrandProductLayer + for BatchedGrandProductToggleLayer +{ fn prove_layer( &mut self, claims_to_verify: &mut Vec, r_grand_product: &mut Vec, transcript: &mut ProofTranscript, - ) -> BatchedGrandProductLayerProof { + ) -> BatchedGrandProductLayerProof { // produce a fresh set of coeffs let coeffs: Vec = transcript.challenge_vector(claims_to_verify.len()); // produce a joint claim @@ -1425,17 +1478,23 @@ impl BatchedGrandProductLayer for BatchedGrandProductToggleLaye proof: sumcheck_proof, left_claims, right_claims, + _marker: PhantomData, } } } -pub struct ToggledBatchedGrandProduct { - toggle_layer: BatchedGrandProductToggleLayer, - sparse_layers: Vec>, +pub struct ToggledBatchedGrandProduct { + toggle_layer: BatchedGrandProductToggleLayer, + sparse_layers: Vec>, + _marker: PhantomData, } -impl> BatchedGrandProduct - for ToggledBatchedGrandProduct +impl< + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, + > BatchedGrandProduct + for ToggledBatchedGrandProduct { type Leaves = (Vec>, Vec>); // (flags, fingerprints) type Config = (); @@ -1446,7 +1505,8 @@ impl> BatchedGrandProduct let num_layers = fingerprints[0].len().log_2(); let toggle_layer = BatchedGrandProductToggleLayer::new(flags, fingerprints); - let mut layers: Vec> = Vec::with_capacity(num_layers); + let mut layers: Vec> = + Vec::with_capacity(num_layers); layers.push(toggle_layer.layer_output()); for i in 0..num_layers - 1 { @@ -1460,18 +1520,20 @@ impl> BatchedGrandProduct layers.push(BatchedSparseGrandProductLayer { layer_len: len, layers: new_layers, + _marker: PhantomData, }); } Self { toggle_layer, sparse_layers: layers, + _marker: PhantomData, } } #[tracing::instrument(skip_all, name = "ToggledBatchedGrandProduct::construct_with_config")] fn construct_with_config(leaves: Self::Leaves, _config: Self::Config) -> Self { - >::construct(leaves) + >::construct(leaves) } fn num_layers(&self) -> usize { @@ -1479,7 +1541,8 @@ impl> BatchedGrandProduct } fn claims(&self) -> Vec { - let last_layers = &self.sparse_layers.last().unwrap(); + let last_layers: &BatchedSparseGrandProductLayer = + self.sparse_layers.last().unwrap(); let (left_claims, right_claims) = last_layers.final_claims(); left_claims .iter() @@ -1488,19 +1551,21 @@ impl> BatchedGrandProduct .collect() } - fn layers(&'_ mut self) -> impl Iterator> { - [&mut self.toggle_layer as &mut dyn BatchedGrandProductLayer] + fn layers( + &'_ mut self, + ) -> impl Iterator> { + [&mut self.toggle_layer as &mut dyn BatchedGrandProductLayer] .into_iter() .chain( self.sparse_layers .iter_mut() - .map(|layer| layer as &mut dyn BatchedGrandProductLayer), + .map(|layer| layer as &mut dyn BatchedGrandProductLayer), ) .rev() } fn verify_sumcheck_claim( - layer_proofs: &[BatchedGrandProductLayerProof], + layer_proofs: &[BatchedGrandProductLayerProof], layer_index: usize, coeffs: &[F], sumcheck_claim: F, @@ -1563,6 +1628,7 @@ impl> BatchedGrandProduct mod grand_product_tests { use super::*; use crate::poly::commitment::zeromorph::Zeromorph; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Bn254, Fr}; use ark_std::{test_rng, One}; use rand_core::RngCore; @@ -1580,25 +1646,28 @@ mod grand_product_tests { .take(BATCH_SIZE) .collect(); - let mut batched_circuit = as BatchedGrandProduct< - Fr, - Zeromorph, - >>::construct(leaves); - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); + let mut batched_circuit = + as BatchedGrandProduct< + Fr, + Zeromorph, + KeccakTranscript, + >>::construct(leaves); + let mut transcript: KeccakTranscript = KeccakTranscript::new(b"test_transcript"); // I love the rust type system - let claims = - as BatchedGrandProduct>>::claims( - &batched_circuit, - ); - let (proof, r_prover) = as BatchedGrandProduct< + let claims = as BatchedGrandProduct< Fr, - Zeromorph, - >>::prove_grand_product( - &mut batched_circuit, None, &mut transcript, None - ); - - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); + Zeromorph, + KeccakTranscript, + >>::claims(&batched_circuit); + let (proof, r_prover) = + as BatchedGrandProduct< + Fr, + Zeromorph, + KeccakTranscript, + >>::prove_grand_product(&mut batched_circuit, None, &mut transcript, None); + + let mut transcript: KeccakTranscript = KeccakTranscript::new(b"test_transcript"); let (_, r_verifier) = BatchedDenseGrandProduct::verify_grand_product( &proof, &claims, @@ -1628,7 +1697,8 @@ mod grand_product_tests { }) .take(BATCH_SIZE) .collect(); - let mut batched_dense_layer = BatchedDenseGrandProductLayer::new(dense_layers.clone()); + let mut batched_dense_layer = + BatchedDenseGrandProductLayer::::new(dense_layers.clone()); let sparse_layers: Vec> = dense_layers .iter() @@ -1642,13 +1712,14 @@ mod grand_product_tests { DynamicDensityGrandProductLayer::Sparse(sparse_layer) }) .collect(); - let mut batched_sparse_layer: BatchedSparseGrandProductLayer = + let mut batched_sparse_layer: BatchedSparseGrandProductLayer = BatchedSparseGrandProductLayer { layer_len: LAYER_SIZE, layers: sparse_layers, + _marker: PhantomData, }; - let condense = |sparse_layers: BatchedSparseGrandProductLayer| { + let condense = |sparse_layers: BatchedSparseGrandProductLayer| { sparse_layers .layers .iter() @@ -1730,10 +1801,12 @@ mod grand_product_tests { }) .take(BATCH_SIZE) .collect(); - let dense_layers: BatchedSparseGrandProductLayer = BatchedSparseGrandProductLayer { - layer_len: LAYER_SIZE, - layers: dense_layers, - }; + let dense_layers: BatchedSparseGrandProductLayer = + BatchedSparseGrandProductLayer { + layer_len: LAYER_SIZE, + layers: dense_layers, + _marker: PhantomData, + }; let sparse_layers: Vec> = dense_layers .layers @@ -1752,10 +1825,12 @@ mod grand_product_tests { DynamicDensityGrandProductLayer::Sparse(sparse_layer) }) .collect(); - let sparse_layers: BatchedSparseGrandProductLayer = BatchedSparseGrandProductLayer { - layer_len: LAYER_SIZE, - layers: sparse_layers, - }; + let sparse_layers: BatchedSparseGrandProductLayer = + BatchedSparseGrandProductLayer { + layer_len: LAYER_SIZE, + layers: sparse_layers, + _marker: PhantomData, + }; let r_eq = std::iter::repeat_with(|| Fr::random(&mut rng)) .take(LAYER_SIZE.log_2() - 1) diff --git a/jolt-core/src/subprotocols/grand_product_quarks.rs b/jolt-core/src/subprotocols/grand_product_quarks.rs index ab956ad1a..c67ecc568 100644 --- a/jolt-core/src/subprotocols/grand_product_quarks.rs +++ b/jolt-core/src/subprotocols/grand_product_quarks.rs @@ -9,25 +9,30 @@ use crate::poly::dense_mlpoly::DensePolynomial; use crate::poly::eq_poly::EqPolynomial; use crate::poly::opening_proof::{ProverOpeningAccumulator, VerifierOpeningAccumulator}; use crate::utils::math::Math; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use ark_serialize::*; use ark_std::{One, Zero}; use itertools::Itertools; use rayon::prelude::*; +use std::marker::PhantomData; use thiserror::Error; #[derive(CanonicalSerialize, CanonicalDeserialize)] -pub struct QuarkGrandProductProof { - sumcheck_proof: SumcheckInstanceProof, +pub struct QuarkGrandProductProof< + PCS: CommitmentScheme, + ProofTranscript: Transcript, +> { + sumcheck_proof: SumcheckInstanceProof, g_commitment: Vec, claimed_eval_g_r: Vec, claimed_eval_g_r_x: (Vec, Vec), helper_values: (Vec, Vec), num_vars: usize, } -pub struct QuarkGrandProduct { +pub struct QuarkGrandProduct { polynomials: Vec>, - base_layers: Vec>, + base_layers: Vec>, + _marker: PhantomData, } #[derive(Clone, Copy, Debug, Default)] @@ -56,8 +61,12 @@ pub struct QuarkGrandProductConfig { pub hybrid_layer_depth: QuarkHybridLayerDepth, } -impl> BatchedGrandProduct - for QuarkGrandProduct +impl BatchedGrandProduct + for QuarkGrandProduct +where + F: JoltField, + PCS: CommitmentScheme, + ProofTranscript: Transcript, { /// The bottom/input layer of the grand products type Leaves = Vec>; @@ -66,7 +75,7 @@ impl> BatchedGrandProduct /// Constructs the grand product circuit(s) from `leaves` #[tracing::instrument(skip_all, name = "BatchedGrandProduct::construct")] fn construct(leaves: Self::Leaves) -> Self { - >::construct_with_config( + >::construct_with_config( leaves, QuarkGrandProductConfig::default(), ) @@ -84,8 +93,10 @@ impl> BatchedGrandProduct }; // Taken 1 to 1 from the code in the BatchedDenseGrandProductLayer implementation - let mut layers = Vec::>::new(); - layers.push(BatchedDenseGrandProductLayer::::new(leaves)); + let mut layers = Vec::>::new(); + layers.push(BatchedDenseGrandProductLayer::::new( + leaves, + )); for i in 0..num_layers { let previous_layers = &layers[i]; @@ -108,6 +119,7 @@ impl> BatchedGrandProduct return Self { polynomials: Vec::>::new(), base_layers: layers, + _marker: PhantomData, }; } @@ -117,6 +129,7 @@ impl> BatchedGrandProduct Self { polynomials: quark_polys, base_layers: layers, + _marker: PhantomData, } } /// The number of layers in the grand product, in this case it is the log of the quark layer size plus the gkr layer depth. @@ -134,7 +147,9 @@ impl> BatchedGrandProduct /// Each layer is mutable so that its polynomials can be bound over the course /// of proving. #[allow(unreachable_code)] - fn layers(&'_ mut self) -> impl Iterator> { + fn layers( + &'_ mut self, + ) -> impl Iterator> { panic!("We don't use the default prover and so we don't need the generic iterator"); std::iter::empty() } @@ -143,10 +158,10 @@ impl> BatchedGrandProduct #[tracing::instrument(skip_all, name = "BatchedGrandProduct::prove_grand_product")] fn prove_grand_product( &mut self, - opening_accumulator: Option<&mut ProverOpeningAccumulator>, + opening_accumulator: Option<&mut ProverOpeningAccumulator>, transcript: &mut ProofTranscript, setup: Option<&PCS::Setup>, - ) -> (BatchedGrandProductProof, Vec) { + ) -> (BatchedGrandProductProof, Vec) { let mut proof_layers = Vec::with_capacity(self.base_layers.len()); // For proofs of polynomials of size less than 16 we support these with no quark proof @@ -154,7 +169,7 @@ impl> BatchedGrandProduct // When doing the quark hybrid proof, we first prove the grand product of a layer of a polynomial which is 4 layers deep in the tree // of a standard layered sumcheck grand product, then we use the sumcheck layers to prove via gkr layers that the random point opened // by the quark proof is in fact the folded result of the base layer. - let (quark, random, claims) = QuarkGrandProductProof::::prove( + let (quark, random, claims) = QuarkGrandProductProof::::prove( &self.polynomials, opening_accumulator.unwrap(), transcript, @@ -165,7 +180,11 @@ impl> BatchedGrandProduct ( None, Vec::::new(), - as BatchedGrandProduct>::claims(self), + as BatchedGrandProduct< + F, + PCS, + ProofTranscript, + >>::claims(self), ) }; @@ -185,9 +204,9 @@ impl> BatchedGrandProduct /// Verifies the given grand product proof. #[tracing::instrument(skip_all, name = "BatchedGrandProduct::verify_grand_product")] fn verify_grand_product( - proof: &BatchedGrandProductProof, + proof: &BatchedGrandProductProof, claims: &Vec, - opening_accumulator: Option<&mut VerifierOpeningAccumulator>, + opening_accumulator: Option<&mut VerifierOpeningAccumulator>, transcript: &mut ProofTranscript, _setup: Option<&PCS::Setup>, ) -> (Vec, Vec) { @@ -207,11 +226,12 @@ impl> BatchedGrandProduct } }; - let (sumcheck_claims, sumcheck_r) = >::verify_layers( - &proof.layers, - &v_points, - transcript, - rand, + let (sumcheck_claims, sumcheck_r) = >::verify_layers( + &proof.layers, &v_points, transcript, rand ); (sumcheck_claims, sumcheck_r) @@ -231,7 +251,11 @@ pub enum QuarkError { InvalidBinding, } -impl QuarkGrandProductProof { +impl QuarkGrandProductProof +where + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ /// Computes a grand product proof using the Section 5 technique from Quarks Paper /// First - Extends the evals of v to create an f poly, then commits to it and evals /// Then - Constructs a g poly and preforms sumcheck proof that sum == 0 @@ -239,7 +263,7 @@ impl QuarkGrandProductProof { /// Returns a random point and evaluation to be verified by the caller (which our hybrid prover does with GKR) fn prove( leaves: &[Vec], - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, setup: &PCS::Setup, ) -> (Self, Vec, Vec) { @@ -253,7 +277,7 @@ impl QuarkGrandProductProof { for v in leaves.iter() { let v_polynomial = DensePolynomial::::new(v.to_vec()); - let (f_1_r, f_r_0, f_r_1, p) = v_into_f::(&v_polynomial); + let (f_1_r, f_r_0, f_r_1, p) = v_into_f::(&v_polynomial); v_polys.push(v_polynomial); g_polys.push(f_1_r.clone()); sumcheck_polys.push(f_1_r); @@ -272,14 +296,14 @@ impl QuarkGrandProductProof { // Now we do the sumcheck using the prove arbitrary // First instantiate our polynomials let tau: Vec = transcript.challenge_vector(v_variables); - let evals: DensePolynomial<::Field> = + let evals: DensePolynomial<>::Field> = DensePolynomial::new(EqPolynomial::evals(&tau)); //We add evals as the second to last polynomial in the sumcheck sumcheck_polys.push(evals); // Next we calculate the polynomial equal to 1 at all points but 1,1,1...,0 let challenge_sum = vec![PCS::Field::one(); v_variables]; - let eq_sum: DensePolynomial<::Field> = + let eq_sum: DensePolynomial<>::Field> = DensePolynomial::new(EqPolynomial::evals(&challenge_sum)); //We add evals as the last polynomial in the sumcheck sumcheck_polys.push(eq_sum); @@ -312,14 +336,15 @@ impl QuarkGrandProductProof { // Now run the sumcheck in arbitrary mode // Note - We use the final randomness from binding all variables (x) as the source random for the openings so the verifier can // check that the base layer is the same as is committed too. - let (sumcheck_proof, x, _) = SumcheckInstanceProof::::prove_arbitrary::<_>( - &rlc_claims, - v_variables, - &mut sumcheck_polys, - output_check_fn, - 3, - transcript, - ); + let (sumcheck_proof, x, _) = + SumcheckInstanceProof::::prove_arbitrary::<_>( + &rlc_claims, + v_variables, + &mut sumcheck_polys, + output_check_fn, + 3, + transcript, + ); let borrowed: Vec<&DensePolynomial> = g_polys.iter().collect(); let chis_r = EqPolynomial::evals(&x); @@ -344,10 +369,15 @@ impl QuarkGrandProductProof { // Therefore we do a line reduced opening on g(r', 0) and g(r', 1)e(); let mut r_prime = vec![PCS::Field::zero(); x.len() - 1]; r_prime.clone_from_slice(&x[1..x.len()]); - let claimed_eval_g_r_x = - open_and_prove::(&r_prime, &g_polys, opening_accumulator, transcript); + let claimed_eval_g_r_x = open_and_prove::( + &r_prime, + &g_polys, + opening_accumulator, + transcript, + ); // next we need to make a claim about h(r', 0) and h(r', 1) so we use our line reduction to make one claim - let ((r_t, h_r_t), helper_values) = line_reduce::(&r_prime, &v_polys, transcript); + let ((r_t, h_r_t), helper_values) = + line_reduce::(&r_prime, &v_polys, transcript); let num_vars = v_variables; @@ -370,7 +400,7 @@ impl QuarkGrandProductProof { fn verify( &self, claims: &[PCS::Field], - opening_accumulator: &mut VerifierOpeningAccumulator, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, n_rounds: usize, ) -> Result<(Vec, Vec), QuarkError> { @@ -412,7 +442,7 @@ impl QuarkGrandProductProof { transcript, ); // Next do the line reduction verification of g(r', 0) and g(r', 1) - line_reduce_opening_verify::( + line_reduce_opening_verify::( &self.claimed_eval_g_r_x, &r_prime, &borrowed_g, @@ -477,14 +507,18 @@ impl QuarkGrandProductProof { // Computes slices of f for the sumcheck #[allow(clippy::type_complexity)] -fn v_into_f( +fn v_into_f( v: &DensePolynomial, ) -> ( DensePolynomial, DensePolynomial, DensePolynomial, PCS::Field, -) { +) +where + PCS: CommitmentScheme, + ProofTranscript: Transcript, +{ let v_length = v.len(); let mut f_evals = vec![PCS::Field::zero(); 2 * v_length]; let (evals, _) = v.split_evals(v.len()); @@ -528,15 +562,15 @@ fn v_into_f( // (or vice versa) as implicitly defining the line t*f(0r) + (t-1)f(1r) and so the evals data alone // is sufficient to calculate the claimed line, then we sample a random value r_star and do an opening proof // on (r_star - 1) * f(0r) + r_star * f(1r) in the commitment to f. -fn open_and_prove( +fn open_and_prove, ProofTranscript: Transcript>( r: &[PCS::Field], f_polys: &[DensePolynomial], - opening_accumulator: &mut ProverOpeningAccumulator, + opening_accumulator: &mut ProverOpeningAccumulator, transcript: &mut ProofTranscript, ) -> (Vec, Vec) { // Do the line reduction protocol let ((r_star, openings_star), (openings_0, openings_1)) = - line_reduce::(r, f_polys, transcript); + line_reduce::(r, f_polys, transcript); opening_accumulator.append( &f_polys.iter().collect::>(), DensePolynomial::new(EqPolynomial::evals(&r_star)), @@ -551,7 +585,7 @@ fn open_and_prove( #[allow(clippy::type_complexity)] /// Calculates the r0 r1 values and writes their evaluation to the transcript before calculating r star and /// the opening of this, but does not prove the opening as that is left to the calling function -fn line_reduce( +fn line_reduce, ProofTranscript: Transcript>( r: &[PCS::Field], f_polys: &[DensePolynomial], transcript: &mut ProofTranscript, @@ -604,11 +638,14 @@ fn line_reduce( } /// Does the counterpart of the open_and_prove by computing an r_star vector point and then validating this opening -fn line_reduce_opening_verify( +fn line_reduce_opening_verify< + PCS: CommitmentScheme, + ProofTranscript: Transcript, +>( data: &(Vec, Vec), r: &[PCS::Field], commitments: &[&PCS::Commitment], - opening_accumulator: &mut VerifierOpeningAccumulator, + opening_accumulator: &mut VerifierOpeningAccumulator, transcript: &mut ProofTranscript, ) { // First compute the line reduction and points @@ -623,7 +660,7 @@ fn line_reduce_opening_verify( ); } -fn line_reduce_verify( +fn line_reduce_verify( data: &(Vec, Vec), r: &[F], transcript: &mut ProofTranscript, @@ -651,6 +688,7 @@ fn line_reduce_verify( mod quark_grand_product_tests { use super::*; use crate::poly::commitment::zeromorph::*; + use crate::utils::transcript::{KeccakTranscript, Transcript}; use ark_bn254::{Bn254, Fr}; use rand_core::SeedableRng; @@ -668,25 +706,27 @@ mod quark_grand_product_tests { .collect(); let known_products = vec![leaves_1.iter().product(), leaves_2.iter().product()]; let v = vec![leaves_1, leaves_2]; - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); + let mut transcript: KeccakTranscript = KeccakTranscript::new(b"test_transcript"); let srs = ZeromorphSRS::::setup(&mut rng, 1 << 9); let setup = srs.trim(1 << 9); - let mut prover_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); - let mut verifier_accumulator: VerifierOpeningAccumulator> = - VerifierOpeningAccumulator::new(); - - let (proof, _, _) = QuarkGrandProductProof::>::prove( - &v, - &mut prover_accumulator, - &mut transcript, - &setup, - ); + let mut prover_accumulator: ProverOpeningAccumulator = + ProverOpeningAccumulator::new(); + let mut verifier_accumulator: VerifierOpeningAccumulator< + Fr, + Zeromorph, + KeccakTranscript, + > = VerifierOpeningAccumulator::new(); + + let (proof, _, _) = QuarkGrandProductProof::< + Zeromorph, + KeccakTranscript, + >::prove(&v, &mut prover_accumulator, &mut transcript, &setup); let batched_proof = prover_accumulator.reduce_and_prove(&setup, &mut transcript); // Note resetting the transcript is important - transcript = ProofTranscript::new(b"test_transcript"); + transcript = KeccakTranscript::new(b"test_transcript"); let result = proof.verify( &known_products, &mut verifier_accumulator, @@ -714,26 +754,33 @@ mod quark_grand_product_tests { let known_products: Vec = vec![leaves_1.iter().product(), leaves_2.iter().product()]; let v = vec![leaves_1, leaves_2]; - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); + let mut transcript: KeccakTranscript = KeccakTranscript::new(b"test_transcript"); let srs = ZeromorphSRS::::setup(&mut rng, 1 << 9); let setup = srs.trim(1 << 9); - let mut prover_accumulator: ProverOpeningAccumulator = ProverOpeningAccumulator::new(); - let mut verifier_accumulator: VerifierOpeningAccumulator> = - VerifierOpeningAccumulator::new(); - - let mut hybrid_grand_product = as BatchedGrandProduct< + let mut prover_accumulator: ProverOpeningAccumulator = + ProverOpeningAccumulator::new(); + let mut verifier_accumulator: VerifierOpeningAccumulator< Fr, - Zeromorph, - >>::construct_with_config(v, config); - let proof: BatchedGrandProductProof> = hybrid_grand_product - .prove_grand_product(Some(&mut prover_accumulator), &mut transcript, Some(&setup)) - .0; + Zeromorph, + KeccakTranscript, + > = VerifierOpeningAccumulator::new(); + + let mut hybrid_grand_product = + as BatchedGrandProduct< + Fr, + Zeromorph, + KeccakTranscript, + >>::construct_with_config(v, config); + let proof: BatchedGrandProductProof, KeccakTranscript> = + hybrid_grand_product + .prove_grand_product(Some(&mut prover_accumulator), &mut transcript, Some(&setup)) + .0; let batched_proof = prover_accumulator.reduce_and_prove(&setup, &mut transcript); // Note resetting the transcript is important - transcript = ProofTranscript::new(b"test_transcript"); + transcript = KeccakTranscript::new(b"test_transcript"); let _ = QuarkGrandProduct::verify_grand_product( &proof, &known_products, diff --git a/jolt-core/src/subprotocols/sumcheck.rs b/jolt-core/src/subprotocols/sumcheck.rs index 8fcee3c72..bb2371d8f 100644 --- a/jolt-core/src/subprotocols/sumcheck.rs +++ b/jolt-core/src/subprotocols/sumcheck.rs @@ -8,12 +8,17 @@ use crate::r1cs::special_polys::{SparsePolynomial, SparseTripleIterator}; use crate::utils::errors::ProofVerifyError; use crate::utils::mul_0_optimized; use crate::utils::thread::drop_in_background_thread; -use crate::utils::transcript::{AppendToTranscript, ProofTranscript}; +use crate::utils::transcript::{AppendToTranscript, Transcript}; use ark_serialize::*; use rayon::prelude::*; +use std::marker::PhantomData; /// Batched cubic sumcheck used in grand products -pub trait BatchedCubicSumcheck: Sync { +pub trait BatchedCubicSumcheck: Sync +where + F: JoltField, + ProofTranscript: Transcript, +{ fn num_rounds(&self) -> usize; fn bind(&mut self, eq_poly: &mut DensePolynomial, r: &F); fn compute_cubic( @@ -31,7 +36,11 @@ pub trait BatchedCubicSumcheck: Sync { coeffs: &[F], eq_poly: &mut DensePolynomial, transcript: &mut ProofTranscript, - ) -> (SumcheckInstanceProof, Vec, (Vec, Vec)) { + ) -> ( + SumcheckInstanceProof, + Vec, + (Vec, Vec), + ) { debug_assert_eq!(eq_poly.get_num_vars(), self.num_rounds()); let mut previous_claim = *claim; @@ -64,7 +73,7 @@ pub trait BatchedCubicSumcheck: Sync { } } -impl SumcheckInstanceProof { +impl SumcheckInstanceProof { /// Create a sumcheck proof for polynomial(s) of arbitrary degree. /// /// Params @@ -493,13 +502,19 @@ impl SumcheckInstanceProof { } #[derive(CanonicalSerialize, CanonicalDeserialize, Debug)] -pub struct SumcheckInstanceProof { +pub struct SumcheckInstanceProof { pub compressed_polys: Vec>, + _marker: PhantomData, } -impl SumcheckInstanceProof { - pub fn new(compressed_polys: Vec>) -> SumcheckInstanceProof { - SumcheckInstanceProof { compressed_polys } +impl SumcheckInstanceProof { + pub fn new( + compressed_polys: Vec>, + ) -> SumcheckInstanceProof { + SumcheckInstanceProof { + compressed_polys, + _marker: PhantomData, + } } /// Verify this sumcheck proof. diff --git a/jolt-core/src/utils/sol_types.rs b/jolt-core/src/utils/sol_types.rs index 7b814825d..f4cc8f292 100644 --- a/jolt-core/src/utils/sol_types.rs +++ b/jolt-core/src/utils/sol_types.rs @@ -9,6 +9,7 @@ use crate::r1cs::spartan::UniformSpartanProof; use crate::subprotocols::grand_product::BatchedGrandProductLayerProof; use crate::subprotocols::grand_product::BatchedGrandProductProof; use crate::subprotocols::sumcheck::SumcheckInstanceProof; +use crate::utils::transcript::Transcript; use alloy_primitives::U256; use alloy_sol_types::sol; use ark_bn254::FrConfig; @@ -130,7 +131,9 @@ impl Into for &HyperKZGVerifierKey { } } -impl Into for &SumcheckInstanceProof { +impl Into + for &SumcheckInstanceProof +{ fn into(self) -> SumcheckProof { let mut compressed_polys = vec![]; @@ -156,8 +159,8 @@ pub fn into_uint256(from: F) -> U256 { } const C: usize = 4; -impl Into - for &UniformSpartanProof, 4>> +impl Into + for &UniformSpartanProof, 4>, ProofTranscript> { fn into(self) -> SpartanProof { let claimed_evals = self @@ -177,7 +180,9 @@ impl Into } } -impl Into for BatchedGrandProductLayerProof { +impl Into + for BatchedGrandProductLayerProof +{ fn into(self) -> GKRLayer { let left = self.left_claims.into_iter().map(into_uint256).collect(); let right = self.right_claims.into_iter().map(into_uint256).collect(); @@ -189,7 +194,9 @@ impl Into for BatchedGrandProductLayerProof { } } -impl Into for BatchedGrandProductProof> { +impl Into + for BatchedGrandProductProof, ProofTranscript> +{ fn into(self) -> GrandProductProof { let layers: Vec = self.layers.into_iter().map(|i| i.into()).collect(); assert!(self.quark_proof.is_none(), "Quarks are unsupported"); diff --git a/jolt-core/src/utils/transcript.rs b/jolt-core/src/utils/transcript.rs index 127f37978..7b2349bc3 100644 --- a/jolt-core/src/utils/transcript.rs +++ b/jolt-core/src/utils/transcript.rs @@ -5,7 +5,7 @@ use sha3::{Digest, Keccak256}; /// Represents the current state of the protocol's Fiat-Shamir transcript. #[derive(Clone)] -pub struct ProofTranscript { +pub struct KeccakTranscript { /// Ethereum-compatible 256-bit running state pub state: [u8; 32], /// We append an ordinal to each invocation of the hash @@ -21,8 +21,61 @@ pub struct ProofTranscript { expected_state_history: Option>, } -impl ProofTranscript { - pub fn new(label: &'static [u8]) -> Self { +impl KeccakTranscript { + /// Gives the hasher object with the running seed and index added + /// To load hash you must call finalize, after appending u8 vectors + fn hasher(&self) -> Keccak256 { + let mut packed = [0_u8; 28].to_vec(); + packed.append(&mut self.n_rounds.to_be_bytes().to_vec()); + // Note we add the extra memory here to improve the ease of eth integrations + Keccak256::new() + .chain_update(self.state) + .chain_update(&packed) + } + + // Loads arbitrary byte lengths using ceil(out/32) invocations of 32 byte randoms + // Discards top bits when the size is less than 32 bytes + fn challenge_bytes(&mut self, out: &mut [u8]) { + let mut remaining_len = out.len(); + let mut start = 0; + while remaining_len > 32 { + self.challenge_bytes32(&mut out[start..start + 32]); + start += 32; + remaining_len -= 32; + } + // We load a full 32 byte random region + let mut full_rand = vec![0_u8; 32]; + self.challenge_bytes32(&mut full_rand); + // Then only clone the first bits of this random region to perfectly fill out + out[start..start + remaining_len].clone_from_slice(&full_rand[0..remaining_len]); + } + + // Loads exactly 32 bytes from the transcript by hashing the seed with the round constant + fn challenge_bytes32(&mut self, out: &mut [u8]) { + assert_eq!(32, out.len()); + let rand: [u8; 32] = self.hasher().finalize().into(); + out.clone_from_slice(rand.as_slice()); + self.update_state(rand); + } + + fn update_state(&mut self, new_state: [u8; 32]) { + self.state = new_state; + self.n_rounds += 1; + #[cfg(test)] + { + if let Some(expected_state_history) = &self.expected_state_history { + assert!( + new_state == expected_state_history[self.n_rounds as usize], + "Fiat-Shamir transcript mismatch" + ); + } + self.state_history.push(new_state); + } + } +} + +impl Transcript for KeccakTranscript { + fn new(label: &'static [u8]) -> Self { // Hash in the label assert!(label.len() < 33); let hasher = if label.len() == 32 { @@ -46,22 +99,11 @@ impl ProofTranscript { #[cfg(test)] /// Compare this transcript to `other` and panic if/when they deviate. /// Typically used to compare the verifier's transcript to the prover's. - pub fn compare_to(&mut self, other: Self) { + fn compare_to(&mut self, other: Self) { self.expected_state_history = Some(other.state_history); } - /// Gives the hasher object with the running seed and index added - /// To load hash you must call finalize, after appending u8 vectors - fn hasher(&self) -> Keccak256 { - let mut packed = [0_u8; 28].to_vec(); - packed.append(&mut self.n_rounds.to_be_bytes().to_vec()); - // Note we add the extra memory here to improve the ease of eth integrations - Keccak256::new() - .chain_update(self.state) - .chain_update(&packed) - } - - pub fn append_message(&mut self, msg: &'static [u8]) { + fn append_message(&mut self, msg: &'static [u8]) { // We require all messages to fit into one evm word and then right pad them // right padding matches the format of the strings when cast to bytes 32 in solidity assert!(msg.len() < 33); @@ -76,13 +118,13 @@ impl ProofTranscript { self.update_state(hasher.finalize().into()); } - pub fn append_bytes(&mut self, bytes: &[u8]) { + fn append_bytes(&mut self, bytes: &[u8]) { // Add the message and label let hasher = self.hasher().chain_update(bytes); self.update_state(hasher.finalize().into()); } - pub fn append_u64(&mut self, x: u64) { + fn append_u64(&mut self, x: u64) { // Allocate into a 32 byte region let mut packed = [0_u8; 24].to_vec(); packed.append(&mut x.to_be_bytes().to_vec()); @@ -90,11 +132,7 @@ impl ProofTranscript { self.update_state(hasher.finalize().into()); } - pub fn append_protocol_name(&mut self, protocol_name: &'static [u8]) { - self.append_message(protocol_name); - } - - pub fn append_scalar(&mut self, scalar: &F) { + fn append_scalar(&mut self, scalar: &F) { let mut buf = vec![]; scalar.serialize_uncompressed(&mut buf).unwrap(); // Serialize uncompressed gives the scalar in LE byte order which is not @@ -104,7 +142,7 @@ impl ProofTranscript { self.append_bytes(&buf); } - pub fn append_scalars(&mut self, scalars: &[F]) { + fn append_scalars(&mut self, scalars: &[F]) { self.append_message(b"begin_append_vector"); for item in scalars.iter() { self.append_scalar(item); @@ -112,7 +150,7 @@ impl ProofTranscript { self.append_message(b"end_append_vector"); } - pub fn append_point(&mut self, point: &G) { + fn append_point(&mut self, point: &G) { // If we add the point at infinity then we hash over a region of zeros if point.is_zero() { self.append_bytes(&[0_u8; 64]); @@ -135,7 +173,7 @@ impl ProofTranscript { self.update_state(hasher.finalize().into()); } - pub fn append_points(&mut self, points: &[G]) { + fn append_points(&mut self, points: &[G]) { self.append_message(b"begin_append_vector"); for item in points.iter() { self.append_point(item); @@ -143,7 +181,7 @@ impl ProofTranscript { self.append_message(b"end_append_vector"); } - pub fn challenge_scalar(&mut self) -> F { + fn challenge_scalar(&mut self) -> F { let mut buf = vec![0u8; F::NUM_BYTES]; self.challenge_bytes(&mut buf); // Because onchain we don't want to do the bit reversal to get the LE ordering @@ -152,14 +190,14 @@ impl ProofTranscript { F::from_bytes(&buf) } - pub fn challenge_vector(&mut self, len: usize) -> Vec { + fn challenge_vector(&mut self, len: usize) -> Vec { (0..len) .map(|_i| self.challenge_scalar()) .collect::>() } // Compute powers of scalar q : (1, q, q^2, ..., q^(len-1)) - pub fn challenge_scalar_powers(&mut self, len: usize) -> Vec { + fn challenge_scalar_powers(&mut self, len: usize) -> Vec { let q: F = self.challenge_scalar(); let mut q_powers = vec![F::one(); len]; for i in 1..len { @@ -167,48 +205,25 @@ impl ProofTranscript { } q_powers } +} - // Loads arbitrary byte lengths using ceil(out/32) invocations of 32 byte randoms - // Discards top bits when the size is less than 32 bytes - fn challenge_bytes(&mut self, out: &mut [u8]) { - let mut remaining_len = out.len(); - let mut start = 0; - while remaining_len > 32 { - self.challenge_bytes32(&mut out[start..start + 32]); - start += 32; - remaining_len -= 32; - } - // We load a full 32 byte random region - let mut full_rand = vec![0_u8; 32]; - self.challenge_bytes32(&mut full_rand); - // Then only clone the first bits of this random region to perfectly fill out - out[start..start + remaining_len].clone_from_slice(&full_rand[0..remaining_len]); - } - - // Loads exactly 32 bytes from the transcript by hashing the seed with the round constant - fn challenge_bytes32(&mut self, out: &mut [u8]) { - assert_eq!(32, out.len()); - let rand: [u8; 32] = self.hasher().finalize().into(); - out.clone_from_slice(rand.as_slice()); - self.update_state(rand); - } - - fn update_state(&mut self, new_state: [u8; 32]) { - self.state = new_state; - self.n_rounds += 1; - #[cfg(test)] - { - if let Some(expected_state_history) = &self.expected_state_history { - assert!( - new_state == expected_state_history[self.n_rounds as usize], - "Fiat-Shamir transcript mismatch" - ); - } - self.state_history.push(new_state); - } - } +pub trait Transcript: Clone + Sync + Send + 'static { + fn new(label: &'static [u8]) -> Self; + #[cfg(test)] + fn compare_to(&mut self, other: Self); + fn append_message(&mut self, msg: &'static [u8]); + fn append_bytes(&mut self, bytes: &[u8]); + fn append_u64(&mut self, x: u64); + fn append_scalar(&mut self, scalar: &F); + fn append_scalars(&mut self, scalars: &[F]); + fn append_point(&mut self, point: &G); + fn append_points(&mut self, points: &[G]); + fn challenge_scalar(&mut self) -> F; + fn challenge_vector(&mut self, len: usize) -> Vec; + // Compute powers of scalar q : (1, q, q^2, ..., q^(len-1)) + fn challenge_scalar_powers(&mut self, len: usize) -> Vec; } pub trait AppendToTranscript { - fn append_to_transcript(&self, transcript: &mut ProofTranscript); + fn append_to_transcript(&self, transcript: &mut ProofTranscript); } diff --git a/jolt-evm-verifier/script/src/bin/grand_product_example.rs b/jolt-evm-verifier/script/src/bin/grand_product_example.rs index a845e0ac2..1ccb5b163 100644 --- a/jolt-evm-verifier/script/src/bin/grand_product_example.rs +++ b/jolt-evm-verifier/script/src/bin/grand_product_example.rs @@ -2,8 +2,7 @@ use jolt_core::{ field::JoltField, poly::commitment::hyperkzg::HyperKZG, subprotocols::grand_product::{BatchedDenseGrandProduct, BatchedGrandProduct}, - utils::transcript::ProofTranscript, - utils::sol_types::GrandProductProof + utils::sol_types::GrandProductProof, }; use std::env; @@ -12,18 +11,22 @@ use alloy_sol_types::{sol, SolType}; use ark_bn254::{Bn254, Fr}; use ark_serialize::CanonicalSerialize; use ark_std::test_rng; +use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; -fn get_proof_data(batched_circuit: &mut BatchedDenseGrandProduct) { - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); +fn get_proof_data(batched_circuit: &mut BatchedDenseGrandProduct) { + let mut transcript: KeccakTranscript = KeccakTranscript::new(b"test_transcript"); - let (proof, r_prover) = as BatchedGrandProduct< + let (proof, r_prover) = + as BatchedGrandProduct< + Fr, + HyperKZG, + KeccakTranscript, + >>::prove_grand_product(batched_circuit, None, &mut transcript, None); + let claims = as BatchedGrandProduct< Fr, - HyperKZG, - >>::prove_grand_product(batched_circuit, None, &mut transcript, None); - let claims = - as BatchedGrandProduct>>::claims( - batched_circuit, - ); + HyperKZG, + KeccakTranscript, + >>::claims(batched_circuit); //encoding the proof into abi @@ -33,10 +36,7 @@ fn get_proof_data(batched_circuit: &mut BatchedDenseGrandProduct) { uint256[] r_prover; }); - let r_prover = r_prover - .iter() - .map(fr_to_uint256) - .collect::>(); + let r_prover = r_prover.iter().map(fr_to_uint256).collect::>(); let claims = claims.iter().map(fr_to_uint256).collect::>(); @@ -73,10 +73,12 @@ fn main() { .take(BATCH_SIZE) .collect(); - let mut batched_circuit = as BatchedGrandProduct< - Fr, - HyperKZG, - >>::construct(leaves); + let mut batched_circuit = + as BatchedGrandProduct< + Fr, + HyperKZG, + KeccakTranscript, + >>::construct(leaves); get_proof_data(&mut batched_circuit); } diff --git a/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs b/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs index f2dbd349e..c8e2c66c0 100644 --- a/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs +++ b/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs @@ -9,7 +9,7 @@ use ark_std::UniformRand; use jolt_core::poly::commitment::commitment_scheme::{BatchType, CommitmentScheme}; use jolt_core::poly::commitment::hyperkzg::*; use jolt_core::poly::dense_mlpoly::DensePolynomial; -use jolt_core::utils::transcript::ProofTranscript; +use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_core::SeedableRng; use jolt_core::utils::sol_types::{HyperKZGProofSol, VK}; @@ -42,7 +42,7 @@ fn main() { .collect::>(), ); let eval = poly.evaluate(&point); - commitments.push(HyperKZG::commit(&pk, &poly).unwrap()); + commitments.push(HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap()); polys.push(poly); evals.push(eval); } @@ -52,7 +52,7 @@ fn main() { } // prove an evaluation - let mut prover_transcript = ProofTranscript::new(b"TestEval"); + let mut prover_transcript = KeccakTranscript::new(b"TestEval"); let proof: HyperKZGProof = HyperKZG::batch_prove( &(pk, vk), borrowed.as_slice(), diff --git a/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs b/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs index 8e2ed2147..bf142e883 100644 --- a/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs +++ b/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs @@ -8,7 +8,7 @@ use ark_ff::PrimeField; use ark_std::UniformRand; use jolt_core::poly::commitment::hyperkzg::*; use jolt_core::poly::dense_mlpoly::DensePolynomial; -use jolt_core::utils::transcript::ProofTranscript; +use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_core::SeedableRng; use jolt_core::utils::sol_types::{HyperKZGProofSol, VK}; @@ -37,14 +37,14 @@ fn main() { let eval = poly.evaluate(&point); // make a commitment - let c = HyperKZG::commit(&pk, &poly).unwrap(); + let c = HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap(); // prove an evaluation - let mut prover_transcript = ProofTranscript::new(b"TestEval"); + let mut prover_transcript = KeccakTranscript::new(b"TestEval"); let proof: HyperKZGProof = HyperKZG::open(&pk, &poly, &point, &eval, &mut prover_transcript).unwrap(); - let mut verifier_tr = ProofTranscript::new(b"TestEval"); + let mut verifier_tr = KeccakTranscript::new(b"TestEval"); assert!(HyperKZG::verify(&vk, &c, &point, &eval, &proof, &mut verifier_tr).is_ok()); sol!(struct Example { diff --git a/jolt-evm-verifier/script/src/bin/transcript_example.rs b/jolt-evm-verifier/script/src/bin/transcript_example.rs index 9e2847059..31a00bb71 100644 --- a/jolt-evm-verifier/script/src/bin/transcript_example.rs +++ b/jolt-evm-verifier/script/src/bin/transcript_example.rs @@ -1,4 +1,7 @@ -use jolt_core::{field::JoltField, utils::transcript::ProofTranscript}; +use jolt_core::{ + field::JoltField, + utils::transcript::{KeccakTranscript, Transcript}, +}; use ark_ff::{BigInteger, PrimeField}; @@ -34,7 +37,7 @@ fn main() { let mut vector_responses = Vec::>::new(); let usizes: Vec = vec![rng.gen(), rng.gen()]; - let mut transcript: ProofTranscript = ProofTranscript::new(b"test_transcript"); + let mut transcript = KeccakTranscript::new(b"test_transcript"); transcript.append_u64(usizes[0]); transcript.append_u64(usizes[1]); diff --git a/jolt-sdk/macros/src/lib.rs b/jolt-sdk/macros/src/lib.rs index 1ec305002..3dba83691 100644 --- a/jolt-sdk/macros/src/lib.rs +++ b/jolt-sdk/macros/src/lib.rs @@ -190,7 +190,7 @@ impl MacroBuilder { #[cfg(all(not(target_arch = "wasm32"), not(feature = "guest")))] pub fn #preprocess_fn_name() -> ( jolt::host::Program, - jolt::JoltPreprocessing<4, jolt::F, jolt::PCS> + jolt::JoltPreprocessing<4, jolt::F, jolt::PCS, jolt::ProofTranscript> ) { #imports @@ -201,7 +201,7 @@ impl MacroBuilder { let (bytecode, memory_init) = program.decode(); // TODO(moodlezoup): Feed in size parameters via macro - let preprocessing: JoltPreprocessing<4, jolt::F, jolt::PCS> = + let preprocessing: JoltPreprocessing<4, jolt::F, jolt::PCS, jolt::ProofTranscript> = RV32IJoltVM::preprocess( bytecode, memory_init, @@ -242,7 +242,7 @@ impl MacroBuilder { #[cfg(all(not(target_arch = "wasm32"), not(feature = "guest")))] pub fn #prove_fn_name( mut program: jolt::host::Program, - preprocessing: jolt::JoltPreprocessing<4, jolt::F, jolt::PCS>, + preprocessing: jolt::JoltPreprocessing<4, jolt::F, jolt::PCS, jolt::ProofTranscript>, #inputs ) -> #prove_output_ty { #imports @@ -403,6 +403,7 @@ impl MacroBuilder { JoltPreprocessing, Jolt, JoltCommitments, + ProofTranscript, RV32IJoltVM, RV32I, RV32IJoltProof, @@ -539,7 +540,7 @@ impl MacroBuilder { #[wasm_bindgen] #[cfg(all(target_arch = "wasm32", not(feature = "guest")))] pub fn #verify_wasm_fn_name(preprocessing_data: &[u8], proof_bytes: &[u8]) -> bool { - use jolt::{Jolt, JoltHyperKZGProof, RV32IJoltVM}; + use jolt::{Jolt, JoltHyperKZGProof, RV32IJoltVM, ProofTranscript}; let decoded_preprocessing_data: DecodedData = deserialize_from_bin(preprocessing_data).unwrap(); let proof = JoltHyperKZGProof::deserialize_from_bytes(proof_bytes).unwrap(); diff --git a/jolt-sdk/src/host_utils.rs b/jolt-sdk/src/host_utils.rs index f5386e403..deadb4304 100644 --- a/jolt-sdk/src/host_utils.rs +++ b/jolt-sdk/src/host_utils.rs @@ -10,7 +10,9 @@ pub use jolt_core::host; pub use jolt_core::jolt::instruction; pub use jolt_core::jolt::vm::{ bytecode::BytecodeRow, - rv32i_vm::{JoltHyperKZGProof, RV32IJoltProof, RV32IJoltVM, Serializable, PCS, RV32I}, + rv32i_vm::{ + JoltHyperKZGProof, ProofTranscript, RV32IJoltProof, RV32IJoltVM, Serializable, PCS, RV32I, + }, Jolt, JoltCommitments, JoltPreprocessing, JoltProof, }; pub use tracer;