From 4f3792a44bbff7735ad9a6600818a0e97a31ca9c Mon Sep 17 00:00:00 2001 From: Alan Szepieniec Date: Mon, 12 Feb 2024 14:20:00 +0100 Subject: [PATCH 1/4] feat: Derandomize wallet generation Add a new method `new_pseudorandom` on `WalletSecret` that takes a seed and produces the wallet secret, as opposed to sampling one from `thread_rng`. This is useful for tests and conceivably in production too. --- src/models/state/wallet/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/models/state/wallet/mod.rs b/src/models/state/wallet/mod.rs index 26c894e84..d05f19da9 100644 --- a/src/models/state/wallet/mod.rs +++ b/src/models/state/wallet/mod.rs @@ -11,7 +11,8 @@ use anyhow::{bail, Context, Result}; use bip39::Mnemonic; use itertools::Itertools; use num_traits::Zero; -use rand::{thread_rng, Rng}; +use rand::rngs::StdRng; +use rand::{thread_rng, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; use std::fs::{self}; use std::path::{Path, PathBuf}; @@ -91,9 +92,15 @@ impl WalletSecret { /// Create a new `Wallet` and populate it with a new secret seed, with entropy /// obtained via `thread_rng()` from the operating system. pub fn new_random() -> Self { + Self::new_pseudorandom(thread_rng().gen()) + } + + /// Create a new `Wallet` and populate it by expanding a given seed. + pub fn new_pseudorandom(seed: [u8; 32]) -> Self { + let mut rng: StdRng = SeedableRng::from_seed(seed); Self { name: STANDARD_WALLET_NAME.to_string(), - secret_seed: SecretKeyMaterial(thread_rng().gen()), + secret_seed: SecretKeyMaterial(rng.gen()), version: STANDARD_WALLET_VERSION, } } From c5785225e5a74498115f28435f3421e5d4416f51 Mon Sep 17 00:00:00 2001 From: Alan Szepieniec Date: Mon, 12 Feb 2024 14:23:02 +0100 Subject: [PATCH 2/4] Derandomize flaky test Adds a new test in `mutator_set_kernel` that is identical to the flaky one discussed in #100, except everything is derandomized. That is to say, every source of randomness is actually pseudorandom and the seed is hardcoded. This test fails reliably. --- .../mutator_set/mutator_set_kernel.rs | 456 ++++++++++++++++++ 1 file changed, 456 insertions(+) diff --git a/src/util_types/mutator_set/mutator_set_kernel.rs b/src/util_types/mutator_set/mutator_set_kernel.rs index 400676ed8..cb034653b 100644 --- a/src/util_types/mutator_set/mutator_set_kernel.rs +++ b/src/util_types/mutator_set/mutator_set_kernel.rs @@ -562,9 +562,22 @@ mod accumulation_scheme_tests { use rand::prelude::*; use rand::Rng; + use tasm_lib::twenty_first::util_types::storage_vec::StorageVec; use twenty_first::shared_math::tip5::Tip5; use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; + use crate::config_models::network::Network; + use crate::models::blockchain::block::Block; + use crate::models::blockchain::transaction::neptune_coins::NeptuneCoins; + use crate::models::blockchain::transaction::utxo::Utxo; + use crate::models::blockchain::transaction::PublicAnnouncement; + use crate::models::state::wallet::utxo_notification_pool::UtxoNotifier; + use crate::models::state::wallet::WalletSecret; + use crate::models::state::UtxoReceiverData; + use crate::tests::shared::add_block; + use crate::tests::shared::get_mock_global_state; + use crate::tests::shared::get_mock_wallet_state; + use crate::tests::shared::make_mock_block_with_valid_pow; use crate::util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator; use crate::util_types::mutator_set::mutator_set_trait::commit; use crate::util_types::mutator_set::mutator_set_trait::MutatorSet; @@ -1124,4 +1137,447 @@ mod accumulation_scheme_tests { "Membership proof must fail after removal" ); } + + #[tokio::test] + async fn flaky_mutator_set_test() { + let mut rng: StdRng = + SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); + // let seed: [u8; 32] = rng.gen(); + let seed = [ + 0xf4, 0xc2, 0x1c, 0xd0, 0x5a, 0xac, 0x99, 0xe7, 0x3a, 0x1e, 0x29, 0x7f, 0x16, 0xc1, + 0x50, 0x5e, 0x1e, 0xd, 0x4b, 0x49, 0x51, 0x9c, 0x1b, 0xa0, 0x38, 0x3c, 0xd, 0x83, 0x29, + 0xdb, 0xab, 0xe2, + ]; + println!( + "seed: [{}]", + seed.iter().map(|h| format!("{:#x}", h)).join(", ") + ); + rng = SeedableRng::from_seed(seed); + + // Test various parts of the state update when a block contains multiple inputs and outputs + let network = Network::Alpha; + let genesis_wallet_state = get_mock_wallet_state(None, network).await; + let genesis_spending_key = genesis_wallet_state + .wallet_secret + .nth_generation_spending_key(0); + let genesis_state_lock = + get_mock_global_state(network, 3, Some(genesis_wallet_state.wallet_secret)).await; + + let wallet_secret_alice = WalletSecret::new_pseudorandom(rng.gen()); + let alice_spending_key = wallet_secret_alice.nth_generation_spending_key(0); + let alice_state_lock = get_mock_global_state(network, 3, Some(wallet_secret_alice)).await; + + let wallet_secret_bob = WalletSecret::new_pseudorandom(rng.gen()); + let bob_spending_key = wallet_secret_bob.nth_generation_spending_key(0); + let bob_state_lock = get_mock_global_state(network, 3, Some(wallet_secret_bob)).await; + + let genesis_block = Block::genesis_block(); + + let (mut block_1, cb_utxo, cb_output_randomness) = + make_mock_block_with_valid_pow(&genesis_block, None, genesis_spending_key.to_address()); + + // Send two outputs each to Alice and Bob, from genesis receiver + let fee = NeptuneCoins::one(); + let sender_randomness: Digest = rng.gen(); + let receiver_data_for_alice = vec![ + UtxoReceiverData { + public_announcement: PublicAnnouncement::default(), + receiver_privacy_digest: alice_spending_key.to_address().privacy_digest, + sender_randomness, + utxo: Utxo { + lock_script_hash: alice_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(41).to_native_coins(), + }, + }, + UtxoReceiverData { + public_announcement: PublicAnnouncement::default(), + receiver_privacy_digest: alice_spending_key.to_address().privacy_digest, + sender_randomness, + utxo: Utxo { + lock_script_hash: alice_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(59).to_native_coins(), + }, + }, + ]; + // Two outputs for Bob + let receiver_data_for_bob = vec![ + UtxoReceiverData { + public_announcement: PublicAnnouncement::default(), + receiver_privacy_digest: bob_spending_key.to_address().privacy_digest, + sender_randomness, + utxo: Utxo { + lock_script_hash: bob_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(141).to_native_coins(), + }, + }, + UtxoReceiverData { + public_announcement: PublicAnnouncement::default(), + receiver_privacy_digest: bob_spending_key.to_address().privacy_digest, + sender_randomness, + utxo: Utxo { + lock_script_hash: bob_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(59).to_native_coins(), + }, + }, + ]; + { + let tx_to_alice_and_bob = genesis_state_lock + .lock_guard_mut() + .await + .create_transaction( + [ + receiver_data_for_alice.clone(), + receiver_data_for_bob.clone(), + ] + .concat(), + fee, + ) + .await + .unwrap(); + + // Absorb and verify validity + block_1.accumulate_transaction(tx_to_alice_and_bob); + assert!(block_1.is_valid(&genesis_block)); + } + + println!("Accumulated transaction into block_1."); + println!( + "Transaction has {} inputs (removal records) and {} outputs (addition records)", + block_1.kernel.body.transaction.kernel.inputs.len(), + block_1.kernel.body.transaction.kernel.outputs.len() + ); + + // Update chain states + for state_lock in [&genesis_state_lock, &alice_state_lock, &bob_state_lock] { + let mut state = state_lock.lock_guard_mut().await; + add_block(&mut state, block_1.clone()).await.unwrap(); + state + .chain + .archival_state_mut() + .update_mutator_set(&block_1) + .await + .unwrap(); + } + + { + // Update wallets + let mut genesis_state = genesis_state_lock.lock_guard_mut().await; + genesis_state + .wallet_state + .expected_utxos + .add_expected_utxo( + cb_utxo, + cb_output_randomness, + genesis_spending_key.privacy_preimage, + UtxoNotifier::OwnMiner, + ) + .unwrap(); + genesis_state + .wallet_state + .update_wallet_state_with_new_block( + &genesis_block.kernel.body.mutator_set_accumulator, + &block_1, + ) + .await + .unwrap(); + assert_eq!( + 3, + genesis_state + .wallet_state + .wallet_db + .monitored_utxos() + .len(), "Genesis receiver must have 3 UTXOs after block 1: change from transaction, coinbase from block 1, and the spent premine UTXO" + ); + } + + { + let mut alice_state = alice_state_lock.lock_guard_mut().await; + for rec_data in receiver_data_for_alice { + alice_state + .wallet_state + .expected_utxos + .add_expected_utxo( + rec_data.utxo.clone(), + rec_data.sender_randomness, + alice_spending_key.privacy_preimage, + UtxoNotifier::Cli, + ) + .unwrap(); + } + alice_state + .wallet_state + .update_wallet_state_with_new_block( + &genesis_block.kernel.body.mutator_set_accumulator, + &block_1, + ) + .await + .unwrap(); + } + + { + let mut bob_state = bob_state_lock.lock_guard_mut().await; + for rec_data in receiver_data_for_bob { + bob_state + .wallet_state + .expected_utxos + .add_expected_utxo( + rec_data.utxo.clone(), + rec_data.sender_randomness, + bob_spending_key.privacy_preimage, + UtxoNotifier::Cli, + ) + .unwrap(); + } + bob_state + .wallet_state + .update_wallet_state_with_new_block( + &genesis_block.kernel.body.mutator_set_accumulator, + &block_1, + ) + .await + .unwrap(); + } + + // Now Alice should have a balance of 100 and Bob a balance of 200 + + assert_eq!( + NeptuneCoins::new(100), + alice_state_lock + .lock_guard() + .await + .get_wallet_status_for_tip() + .await + .synced_unspent_amount + ); + assert_eq!( + NeptuneCoins::new(200), + bob_state_lock + .lock_guard() + .await + .get_wallet_status_for_tip() + .await + .synced_unspent_amount + ); + + // Make two transactions: Alice sends two UTXOs to Genesis and Bob sends three UTXOs to genesis + let receiver_data_from_alice = vec![ + UtxoReceiverData { + utxo: Utxo { + lock_script_hash: genesis_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(50).to_native_coins(), + }, + sender_randomness: rng.gen(), + receiver_privacy_digest: genesis_spending_key.to_address().privacy_digest, + public_announcement: PublicAnnouncement::default(), + }, + UtxoReceiverData { + utxo: Utxo { + lock_script_hash: genesis_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(49).to_native_coins(), + }, + sender_randomness: rng.gen(), + receiver_privacy_digest: genesis_spending_key.to_address().privacy_digest, + public_announcement: PublicAnnouncement::default(), + }, + ]; + let tx_from_alice = alice_state_lock + .lock_guard_mut() + .await + .create_transaction(receiver_data_from_alice.clone(), NeptuneCoins::new(1)) + .await + .unwrap(); + let receiver_data_from_bob = vec![ + UtxoReceiverData { + utxo: Utxo { + lock_script_hash: genesis_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(50).to_native_coins(), + }, + sender_randomness: rng.gen(), + receiver_privacy_digest: genesis_spending_key.to_address().privacy_digest, + public_announcement: PublicAnnouncement::default(), + }, + UtxoReceiverData { + utxo: Utxo { + lock_script_hash: genesis_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(50).to_native_coins(), + }, + sender_randomness: rng.gen(), + receiver_privacy_digest: genesis_spending_key.to_address().privacy_digest, + public_announcement: PublicAnnouncement::default(), + }, + UtxoReceiverData { + utxo: Utxo { + lock_script_hash: genesis_spending_key.to_address().lock_script().hash(), + coins: NeptuneCoins::new(98).to_native_coins(), + }, + sender_randomness: rng.gen(), + receiver_privacy_digest: genesis_spending_key.to_address().privacy_digest, + public_announcement: PublicAnnouncement::default(), + }, + ]; + let tx_from_bob = bob_state_lock + .lock_guard_mut() + .await + .create_transaction(receiver_data_from_bob.clone(), NeptuneCoins::new(2)) + .await + .unwrap(); + + // Make block_2 with tx that contains: + // - 4 inputs: 2 from Alice and 2 from Bob + // - 6 outputs: 2 from Alice to Genesis, 3 from Bob to Genesis, and 1 coinbase to Genesis + let (mut block_2, cb_utxo_block_2, cb_sender_randomness_block_2) = + make_mock_block_with_valid_pow(&block_1, None, genesis_spending_key.to_address()); + block_2.accumulate_transaction(tx_from_alice); + assert_eq!(2, block_2.kernel.body.transaction.kernel.inputs.len()); + assert_eq!(3, block_2.kernel.body.transaction.kernel.outputs.len()); + + // This test is flaky! + // It fails roughly every 3 out of 10 runs. If you run with `-- --nocapture` then + // (on Alan's machine) it runs with a 100% success rate. But doing that *and* + // commenting out the next two print statements boosts the failure rate again. + println!("accumulated Alice's transaction into block; number of inputs: {}; number of outputs: {}", block_2.kernel.body.transaction.kernel.inputs.len(), block_2.kernel.body.transaction.kernel.outputs.len()); + println!( + "Transaction from Bob has {} inputs and {} outputs", + tx_from_bob.kernel.inputs.len(), + tx_from_bob.kernel.outputs.len() + ); + + block_2.accumulate_transaction(tx_from_bob); + + // Sanity checks + assert_eq!(4, block_2.kernel.body.transaction.kernel.inputs.len()); + assert_eq!(6, block_2.kernel.body.transaction.kernel.outputs.len()); + assert!(block_2.is_valid(&block_1)); + + // Update chain states + for state_lock in [&genesis_state_lock, &alice_state_lock, &bob_state_lock] { + let mut state = state_lock.lock_guard_mut().await; + + add_block(&mut state, block_2.clone()).await.unwrap(); + state + .chain + .archival_state_mut() + .update_mutator_set(&block_2) + .await + .unwrap(); + } + + // Update wallets and verify that Alice and Bob's balances are zero + alice_state_lock + .lock_guard_mut() + .await + .wallet_state + .update_wallet_state_with_new_block( + &block_1.kernel.body.mutator_set_accumulator, + &block_2, + ) + .await + .unwrap(); + bob_state_lock + .lock_guard_mut() + .await + .wallet_state + .update_wallet_state_with_new_block( + &block_1.kernel.body.mutator_set_accumulator, + &block_2, + ) + .await + .unwrap(); + assert!(alice_state_lock + .lock_guard() + .await + .get_wallet_status_for_tip() + .await + .synced_unspent_amount + .is_zero()); + assert!(bob_state_lock + .lock_guard() + .await + .get_wallet_status_for_tip() + .await + .synced_unspent_amount + .is_zero()); + + // Update genesis wallet and verify that all ingoing UTXOs are recorded + for rec_data in receiver_data_from_alice { + genesis_state_lock + .lock_guard_mut() + .await + .wallet_state + .expected_utxos + .add_expected_utxo( + rec_data.utxo.clone(), + rec_data.sender_randomness, + genesis_spending_key.privacy_preimage, + UtxoNotifier::Cli, + ) + .unwrap(); + } + for rec_data in receiver_data_from_bob { + genesis_state_lock + .lock_guard_mut() + .await + .wallet_state + .expected_utxos + .add_expected_utxo( + rec_data.utxo.clone(), + rec_data.sender_randomness, + genesis_spending_key.privacy_preimage, + UtxoNotifier::Cli, + ) + .unwrap(); + } + genesis_state_lock + .lock_guard_mut() + .await + .wallet_state + .expected_utxos + .add_expected_utxo( + cb_utxo_block_2, + cb_sender_randomness_block_2, + genesis_spending_key.privacy_preimage, + UtxoNotifier::Cli, + ) + .unwrap(); + genesis_state_lock + .lock_guard_mut() + .await + .wallet_state + .update_wallet_state_with_new_block( + &block_1.kernel.body.mutator_set_accumulator, + &block_2, + ) + .await + .unwrap(); + + // Verify that states and wallets can be updated successfully + assert_eq!( + 9, + genesis_state_lock.lock_guard().await + .wallet_state + .wallet_db + .monitored_utxos() + .len(), "Genesis receiver must have 9 UTXOs after block 2: 3 after block 1, and 6 added by block 2" + ); + + // Verify that mutator sets are updated correctly and that last block is block 2 + for state_lock in [&genesis_state_lock, &alice_state_lock, &bob_state_lock] { + let state = state_lock.lock_guard().await; + + assert_eq!( + block_2.kernel.body.mutator_set_accumulator, + state + .chain + .archival_state() + .archival_mutator_set + .ams() + .accumulator(), + "AMS must be correctly updated" + ); + assert_eq!( + block_2, + state.chain.archival_state().get_latest_block().await + ); + } + } } From bd15ae7a30c4418cdc3c9edcf193f5523269058d Mon Sep 17 00:00:00 2001 From: Alan Szepieniec Date: Mon, 12 Feb 2024 20:19:24 +0100 Subject: [PATCH 3/4] fix: Fix flaky mutator set test This commit fixes `accumulate_transaction` so that the flaky mutator set test does not fail anymore. Closes #100. --- src/models/blockchain/block/mod.rs | 39 ++-- .../blockchain/block/mutator_set_update.rs | 6 +- src/models/blockchain/transaction/mod.rs | 6 +- src/models/state/archival_state.rs | 46 +++-- src/models/state/mempool.rs | 13 +- src/models/state/wallet/mod.rs | 9 +- src/models/state/wallet/wallet_state.rs | 33 ++-- .../mutator_set/ms_membership_proof.rs | 9 +- .../mutator_set/mutator_set_kernel.rs | 179 ++---------------- src/util_types/mutator_set/removal_record.rs | 23 +-- 10 files changed, 113 insertions(+), 250 deletions(-) diff --git a/src/models/blockchain/block/mod.rs b/src/models/blockchain/block/mod.rs index 921755d79..7c1d8d358 100644 --- a/src/models/blockchain/block/mod.rs +++ b/src/models/blockchain/block/mod.rs @@ -215,8 +215,12 @@ impl Block { /// Merge a transaction into this block's transaction. /// The mutator set data must be valid in all inputs. - pub fn accumulate_transaction(&mut self, transaction: Transaction) { - // merge + pub fn accumulate_transaction( + &mut self, + transaction: Transaction, + old_mutator_set_accumulator: &MutatorSetAccumulator, + ) { + // merge transactions let merged_timestamp = BFieldElement::new(max( self.kernel.header.timestamp.value(), max( @@ -224,24 +228,37 @@ impl Block { transaction.kernel.timestamp.value(), ), )); + let new_transaction = self + .kernel + .body + .transaction + .clone() + .merge_with(transaction.clone()); - // accumulate - let mut next_mutator_set_accumulator = self.kernel.body.mutator_set_accumulator.clone(); - + // accumulate mutator set updates + // Can't use the current mutator sat accumulator because it is in an in-between state. + let mut new_mutator_set_accumulator = old_mutator_set_accumulator.clone(); let mutator_set_update = MutatorSetUpdate::new( - transaction.kernel.inputs.clone(), - transaction.kernel.outputs.clone(), + [ + self.kernel.body.transaction.kernel.inputs.clone(), + transaction.kernel.inputs, + ] + .concat(), + [ + self.kernel.body.transaction.kernel.outputs.clone(), + transaction.kernel.outputs.clone(), + ] + .concat(), ); - let new_transaction = self.kernel.body.transaction.clone().merge_with(transaction); // Apply the mutator set update to get the `next_mutator_set_accumulator` mutator_set_update - .apply(&mut next_mutator_set_accumulator) + .apply(&mut new_mutator_set_accumulator) .expect("Mutator set mutation must work"); let block_body: BlockBody = BlockBody { transaction: new_transaction, - mutator_set_accumulator: next_mutator_set_accumulator.clone(), + mutator_set_accumulator: new_mutator_set_accumulator.clone(), lock_free_mmr_accumulator: self.kernel.body.lock_free_mmr_accumulator.clone(), block_mmr_accumulator: self.kernel.body.block_mmr_accumulator.clone(), uncle_blocks: self.kernel.body.uncle_blocks.clone(), @@ -560,7 +577,7 @@ mod block_tests { .unwrap(); assert!(new_tx.is_valid(), "Created tx must be valid"); - block_1.accumulate_transaction(new_tx); + block_1.accumulate_transaction(new_tx, &genesis_block.kernel.body.mutator_set_accumulator); assert!( block_1.is_valid(&genesis_block), "Block 1 must be valid after adding a transaction; previous mutator set hash: {} and next mutator set hash: {}", diff --git a/src/models/blockchain/block/mutator_set_update.rs b/src/models/blockchain/block/mutator_set_update.rs index 528d5471e..8e6f447a7 100644 --- a/src/models/blockchain/block/mutator_set_update.rs +++ b/src/models/blockchain/block/mutator_set_update.rs @@ -36,15 +36,11 @@ impl MutatorSetUpdate { let mut removal_records: Vec<&mut RemovalRecord> = removal_records.iter_mut().collect::>(); while let Some(addition_record) = addition_records.pop() { - let update_res = RemovalRecord::batch_update_from_addition( + RemovalRecord::batch_update_from_addition( &mut removal_records, &mut ms_accumulator.kernel, ); - if update_res.is_err() { - bail!("Failed to update removal records with addition record"); - } - ms_accumulator.add(&addition_record); } diff --git a/src/models/blockchain/transaction/mod.rs b/src/models/blockchain/transaction/mod.rs index d23db5af8..5ceddd737 100644 --- a/src/models/blockchain/transaction/mod.rs +++ b/src/models/blockchain/transaction/mod.rs @@ -109,15 +109,13 @@ impl Transaction { RemovalRecord::batch_update_from_addition( &mut block_removal_records, &mut msa_state.kernel, - ) - .expect("MS removal record update from add must succeed in wallet handler"); + ); // Batch update transaction's removal records RemovalRecord::batch_update_from_addition( &mut transaction_removal_records, &mut msa_state.kernel, - ) - .expect("MS removal record update from add must succeed in wallet handler"); + ); // Batch update primitive witness membership proofs if let Witness::Primitive(witness) = &mut self.witness { diff --git a/src/models/state/archival_state.rs b/src/models/state/archival_state.rs index 309dca2f7..d9419bf93 100644 --- a/src/models/state/archival_state.rs +++ b/src/models/state/archival_state.rs @@ -739,7 +739,7 @@ impl ArchivalState { RemovalRecord::batch_update_from_addition( &mut removal_records, &mut self.archival_mutator_set.ams_mut().kernel, - ).expect("MS removal record update from add must succeed in update_mutator_set as block should already be verified"); + ); // Add the element to the mutator set self.archival_mutator_set.ams_mut().add(&addition_record); @@ -976,7 +976,10 @@ mod archival_state_tests { ) .await .unwrap(); - mock_block_2.accumulate_transaction(sender_tx); + mock_block_2.accumulate_transaction( + sender_tx, + &mock_block_1.kernel.body.mutator_set_accumulator, + ); // Remove an element from the mutator set, verify that the active window DB is updated. add_block(&mut genesis_receiver_global_state, mock_block_2.clone()).await?; @@ -1101,7 +1104,14 @@ mod archival_state_tests { .await .unwrap(); - block_1a.accumulate_transaction(sender_tx); + block_1a.accumulate_transaction( + sender_tx, + &archival_state + .genesis_block + .kernel + .body + .mutator_set_accumulator, + ); assert!(block_1a.is_valid(&genesis_block)); @@ -1216,7 +1226,10 @@ mod archival_state_tests { .await .unwrap(); - next_block.accumulate_transaction(sender_tx); + next_block.accumulate_transaction( + sender_tx, + &previous_block.kernel.body.mutator_set_accumulator, + ); assert!( next_block.is_valid(&previous_block), @@ -1359,7 +1372,10 @@ mod archival_state_tests { .await .unwrap(); - block_1_a.accumulate_transaction(sender_tx); + block_1_a.accumulate_transaction( + sender_tx, + &genesis_block.kernel.body.mutator_set_accumulator, + ); // Block with signed transaction must validate assert!(block_1_a.is_valid(&genesis_block)); @@ -1452,7 +1468,10 @@ mod archival_state_tests { .unwrap(); // Absorb and verify validity - block_1.accumulate_transaction(tx_to_alice_and_bob); + block_1.accumulate_transaction( + tx_to_alice_and_bob, + &genesis_block.kernel.body.mutator_set_accumulator, + ); assert!(block_1.is_valid(&genesis_block)); } @@ -1643,22 +1662,11 @@ mod archival_state_tests { // - 6 outputs: 2 from Alice to Genesis, 3 from Bob to Genesis, and 1 coinbase to Genesis let (mut block_2, cb_utxo_block_2, cb_sender_randomness_block_2) = make_mock_block_with_valid_pow(&block_1, None, genesis_spending_key.to_address()); - block_2.accumulate_transaction(tx_from_alice); + block_2.accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator); assert_eq!(2, block_2.kernel.body.transaction.kernel.inputs.len()); assert_eq!(3, block_2.kernel.body.transaction.kernel.outputs.len()); - // This test is flaky! - // It fails roughly every 3 out of 10 runs. If you run with `-- --nocapture` then - // (on Alan's machine) it runs with a 100% success rate. But doing that *and* - // commenting out the next two print statements boosts the failure rate again. - println!("accumulated Alice's transaction into block; number of inputs: {}; number of outputs: {}", block_2.kernel.body.transaction.kernel.inputs.len(), block_2.kernel.body.transaction.kernel.outputs.len()); - println!( - "Transaction from Bob has {} inputs and {} outputs", - tx_from_bob.kernel.inputs.len(), - tx_from_bob.kernel.outputs.len() - ); - - block_2.accumulate_transaction(tx_from_bob); + block_2.accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator); // Sanity checks assert_eq!(4, block_2.kernel.body.transaction.kernel.inputs.len()); diff --git a/src/models/state/mempool.rs b/src/models/state/mempool.rs index c883248e9..43b8889d3 100644 --- a/src/models/state/mempool.rs +++ b/src/models/state/mempool.rs @@ -641,7 +641,8 @@ mod tests { // Create next block which includes preminer's transaction let (mut block_2, _, _) = make_mock_block(&block_1, None, premine_receiver_address); - block_2.accumulate_transaction(tx_by_preminer); + block_2 + .accumulate_transaction(tx_by_preminer, &block_1.kernel.body.mutator_set_accumulator); // Update the mempool with block 2 and verify that the mempool now only contains one tx assert_eq!(2, mempool.len()); @@ -680,7 +681,10 @@ mod tests { "tx_by_other_updated has mutator set hash: {}", tx_by_other_updated.kernel.mutator_set_hash.emojihash() ); - block_3_with_updated_tx.accumulate_transaction(tx_by_other_updated.clone()); + block_3_with_updated_tx.accumulate_transaction( + tx_by_other_updated.clone(), + &block_2.kernel.body.mutator_set_accumulator, + ); assert!( block_3_with_updated_tx.is_valid(&block_2), "Block with tx with updated mutator set data must be valid" @@ -702,7 +706,10 @@ mod tests { let (mut block_14, _, _) = make_mock_block(&previous_block, None, other_receiver_address); assert_eq!(Into::::into(14), block_14.kernel.header.height); tx_by_other_updated = mempool.get_transactions_for_block(usize::MAX)[0].clone(); - block_14.accumulate_transaction(tx_by_other_updated); + block_14.accumulate_transaction( + tx_by_other_updated, + &previous_block.kernel.body.mutator_set_accumulator, + ); assert!( block_14.is_valid(&previous_block), "Block with tx with updated mutator set data must be valid after 10 blocks have been mined" diff --git a/src/models/state/wallet/mod.rs b/src/models/state/wallet/mod.rs index d05f19da9..858ed52fb 100644 --- a/src/models/state/wallet/mod.rs +++ b/src/models/state/wallet/mod.rs @@ -757,7 +757,7 @@ mod wallet_tests { NeptuneCoins::zero(), msa_tip_previous.clone(), ); - next_block.accumulate_transaction(tx); + next_block.accumulate_transaction(tx, &msa_tip_previous); own_wallet_state .update_wallet_state_with_new_block(&msa_tip_previous.clone(), &next_block) @@ -848,7 +848,7 @@ mod wallet_tests { .await .unwrap(); - block_1.accumulate_transaction(valid_tx); + block_1.accumulate_transaction(valid_tx, &previous_msa); // Verify the validity of the merged transaction and block assert!(block_1.is_valid(&genesis_block)); @@ -1099,7 +1099,10 @@ mod wallet_tests { .create_transaction(vec![receiver_data_six.clone()], NeptuneCoins::new(4)) .await .unwrap(); - block_3_b.accumulate_transaction(tx_from_preminer); + block_3_b.accumulate_transaction( + tx_from_preminer, + &block_2_b.kernel.body.mutator_set_accumulator, + ); assert!( block_3_b.is_valid(&block_2_b), "Block must be valid after accumulating txs" diff --git a/src/models/state/wallet/wallet_state.rs b/src/models/state/wallet/wallet_state.rs index 476547c4c..628ef4db1 100644 --- a/src/models/state/wallet/wallet_state.rs +++ b/src/models/state/wallet/wallet_state.rs @@ -321,7 +321,7 @@ impl WalletState { return Ok(()); } - // Find the membership proofs that were valid at the previous tip, as these all have + // Find the membership proofs that were valid at the previous tip. They have // to be updated to the mutator set of the new block. let mut valid_membership_proofs_and_own_utxo_count: HashMap< StrongUtxoKey, @@ -335,12 +335,12 @@ impl WalletState { { Some(ms_mp) => { debug!("Found valid mp for UTXO"); - let insert_ret = valid_membership_proofs_and_own_utxo_count.insert( + let replacement_success = valid_membership_proofs_and_own_utxo_count.insert( StrongUtxoKey::new(utxo_digest, ms_mp.auth_path_aocl.leaf_index), (ms_mp, i), ); assert!( - insert_ret.is_none(), + replacement_success.is_none(), "Strong key must be unique in wallet DB" ); } @@ -365,8 +365,8 @@ impl WalletState { } } - // Loop over all input UTXOs, applying all addition records. To - // a) update all existing MS membership proofs + // Loop over all input UTXOs, applying all addition records. In each iteration, + // a) Update all existing MS membership proofs // b) Register incoming transactions and derive their membership proofs let mut changed_mps = vec![]; let mut msa_state: MutatorSetAccumulator = current_mutator_set_accumulator.clone(); @@ -376,15 +376,7 @@ impl WalletState { let mut removal_records: Vec<&mut RemovalRecord> = removal_records.iter_mut().collect::>(); - for addition_record in new_block - .kernel - .body - .transaction - .kernel - .outputs - .clone() - .into_iter() - { + for addition_record in new_block.kernel.body.transaction.kernel.outputs.iter() { // Don't pull this declaration out of the for-loop since the hash map can grow // within this loop. let utxo_digests = valid_membership_proofs_and_own_utxo_count @@ -393,7 +385,7 @@ impl WalletState { .collect_vec(); { - let res: Result, Box> = + let updated_mp_indices: Result, Box> = MsMembershipProof::batch_update_from_addition( &mut valid_membership_proofs_and_own_utxo_count .values_mut() @@ -401,9 +393,9 @@ impl WalletState { .collect_vec(), &utxo_digests, &msa_state.kernel, - &addition_record, + addition_record, ); - match res { + match updated_mp_indices { Ok(mut indices_of_mutated_mps) => { changed_mps.append(&mut indices_of_mutated_mps) } @@ -412,12 +404,11 @@ impl WalletState { } // Batch update removal records to keep them valid after next addition - RemovalRecord::batch_update_from_addition(&mut removal_records, &mut msa_state.kernel) - .expect("MS removal record update from add must succeed in wallet handler"); + RemovalRecord::batch_update_from_addition(&mut removal_records, &mut msa_state.kernel); // If output UTXO belongs to us, add it to the list of monitored UTXOs and // add its membership proof to the list of managed membership proofs. - if addition_record_to_utxo_info.contains_key(&addition_record) { + if addition_record_to_utxo_info.contains_key(addition_record) { let utxo = addition_record_to_utxo_info[&addition_record].0.clone(); let sender_randomness = addition_record_to_utxo_info[&addition_record].1; let receiver_preimage = addition_record_to_utxo_info[&addition_record].2; @@ -467,7 +458,7 @@ impl WalletState { } // Update mutator set to bring it to the correct state for the next call to batch-update - msa_state.add(&addition_record); + msa_state.add(addition_record); } // sanity check diff --git a/src/util_types/mutator_set/ms_membership_proof.rs b/src/util_types/mutator_set/ms_membership_proof.rs index a118a9d63..d5f19188b 100644 --- a/src/util_types/mutator_set/ms_membership_proof.rs +++ b/src/util_types/mutator_set/ms_membership_proof.rs @@ -65,7 +65,8 @@ impl MsMembershipProof { )) } - /// Update a list of membership proofs in anticipation of an addition + /// Update a list of membership proofs in anticipation of an addition. If successful, + /// return (wrapped in an Ok) a vector of all indices of updated membership proofs. pub fn batch_update_from_addition>( membership_proofs: &mut [&mut Self], own_items: &[Digest], @@ -695,7 +696,6 @@ mod ms_proof_tests { archival_mutator_set.add(&addition_record); } - println!("Added {n} items."); // assert that own mp is valid assert!( @@ -742,8 +742,6 @@ mod ms_proof_tests { } } - println!("Removed {} items.", removal_records.len()); - // assert valid assert!( archival_mutator_set.verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) @@ -765,8 +763,6 @@ mod ms_proof_tests { // - nah, we don't need them for anything anymore } - println!("Reverted {} removals.", reversions.len()); - // assert valid assert!( archival_mutator_set.verify(own_item.unwrap(), own_membership_proof.as_ref().unwrap()) @@ -1063,7 +1059,6 @@ mod ms_proof_tests { // assert!(previous_mutator_set.set_commitment.verify(own_item, self)); } } - println!("Added {n} items."); // revert additions let (_petrified, revertible) = addition_records.split_at(own_index + 1); diff --git a/src/util_types/mutator_set/mutator_set_kernel.rs b/src/util_types/mutator_set/mutator_set_kernel.rs index cb034653b..7dc53138e 100644 --- a/src/util_types/mutator_set/mutator_set_kernel.rs +++ b/src/util_types/mutator_set/mutator_set_kernel.rs @@ -144,20 +144,19 @@ impl> MutatorSetKernel { // if window slides, update filter // First update the inactive part of the SWBF, the SWBF MMR - let chunk: Chunk = self.swbf_active.slid_chunk(); - let chunk_digest: Digest = H::hash(&chunk); + let new_chunk: Chunk = self.swbf_active.slid_chunk(); + let chunk_digest: Digest = H::hash(&new_chunk); + let new_chunk_index = self.swbf_inactive.count_leaves(); self.swbf_inactive.append(chunk_digest); // ignore auth path // Then move window to the right, equivalent to moving values // inside window to the left. self.swbf_active.slide_window(); - let chunk_index_for_inserted_chunk = self.swbf_inactive.count_leaves() - 1; - // Return the chunk that was added to the inactive part of the SWBF. // This chunk is needed by the Archival mutator set. The Regular // mutator set can ignore it. - Some((chunk_index_for_inserted_chunk, chunk)) + Some((new_chunk_index, new_chunk)) } /// Remove a record and return the chunks that have been updated in this process, @@ -189,8 +188,10 @@ impl> MutatorSetKernel { .get_mut(&chunk_index) .unwrap_or_else(|| { panic!( - "Can't get chunk index {chunk_index} from dictionary! dictionary: {:?}", - new_target_chunks_clone.dictionary + "Can't get chunk index {chunk_index} from dictionary! dictionary: {:?}\nAOCL size: {}\nbatch index: {}", + new_target_chunks_clone.dictionary, + self.aocl.count_leaves(), + batch_index ) }); for index in indices { @@ -1140,8 +1141,8 @@ mod accumulation_scheme_tests { #[tokio::test] async fn flaky_mutator_set_test() { - let mut rng: StdRng = - SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); + // let mut rng: StdRng = + // SeedableRng::from_rng(thread_rng()).expect("failure lifting thread_rng to StdRng"); // let seed: [u8; 32] = rng.gen(); let seed = [ 0xf4, 0xc2, 0x1c, 0xd0, 0x5a, 0xac, 0x99, 0xe7, 0x3a, 0x1e, 0x29, 0x7f, 0x16, 0xc1, @@ -1152,7 +1153,7 @@ mod accumulation_scheme_tests { "seed: [{}]", seed.iter().map(|h| format!("{:#x}", h)).join(", ") ); - rng = SeedableRng::from_seed(seed); + let mut rng: StdRng = SeedableRng::from_seed(seed); // Test various parts of the state update when a block contains multiple inputs and outputs let network = Network::Alpha; @@ -1236,7 +1237,10 @@ mod accumulation_scheme_tests { .unwrap(); // Absorb and verify validity - block_1.accumulate_transaction(tx_to_alice_and_bob); + block_1.accumulate_transaction( + tx_to_alice_and_bob, + &genesis_block.kernel.body.mutator_set_accumulator, + ); assert!(block_1.is_valid(&genesis_block)); } @@ -1425,159 +1429,12 @@ mod accumulation_scheme_tests { // Make block_2 with tx that contains: // - 4 inputs: 2 from Alice and 2 from Bob // - 6 outputs: 2 from Alice to Genesis, 3 from Bob to Genesis, and 1 coinbase to Genesis - let (mut block_2, cb_utxo_block_2, cb_sender_randomness_block_2) = + let (mut block_2, _cb_utxo_block_2, _cb_sender_randomness_block_2) = make_mock_block_with_valid_pow(&block_1, None, genesis_spending_key.to_address()); - block_2.accumulate_transaction(tx_from_alice); + block_2.accumulate_transaction(tx_from_alice, &block_1.kernel.body.mutator_set_accumulator); assert_eq!(2, block_2.kernel.body.transaction.kernel.inputs.len()); assert_eq!(3, block_2.kernel.body.transaction.kernel.outputs.len()); - // This test is flaky! - // It fails roughly every 3 out of 10 runs. If you run with `-- --nocapture` then - // (on Alan's machine) it runs with a 100% success rate. But doing that *and* - // commenting out the next two print statements boosts the failure rate again. - println!("accumulated Alice's transaction into block; number of inputs: {}; number of outputs: {}", block_2.kernel.body.transaction.kernel.inputs.len(), block_2.kernel.body.transaction.kernel.outputs.len()); - println!( - "Transaction from Bob has {} inputs and {} outputs", - tx_from_bob.kernel.inputs.len(), - tx_from_bob.kernel.outputs.len() - ); - - block_2.accumulate_transaction(tx_from_bob); - - // Sanity checks - assert_eq!(4, block_2.kernel.body.transaction.kernel.inputs.len()); - assert_eq!(6, block_2.kernel.body.transaction.kernel.outputs.len()); - assert!(block_2.is_valid(&block_1)); - - // Update chain states - for state_lock in [&genesis_state_lock, &alice_state_lock, &bob_state_lock] { - let mut state = state_lock.lock_guard_mut().await; - - add_block(&mut state, block_2.clone()).await.unwrap(); - state - .chain - .archival_state_mut() - .update_mutator_set(&block_2) - .await - .unwrap(); - } - - // Update wallets and verify that Alice and Bob's balances are zero - alice_state_lock - .lock_guard_mut() - .await - .wallet_state - .update_wallet_state_with_new_block( - &block_1.kernel.body.mutator_set_accumulator, - &block_2, - ) - .await - .unwrap(); - bob_state_lock - .lock_guard_mut() - .await - .wallet_state - .update_wallet_state_with_new_block( - &block_1.kernel.body.mutator_set_accumulator, - &block_2, - ) - .await - .unwrap(); - assert!(alice_state_lock - .lock_guard() - .await - .get_wallet_status_for_tip() - .await - .synced_unspent_amount - .is_zero()); - assert!(bob_state_lock - .lock_guard() - .await - .get_wallet_status_for_tip() - .await - .synced_unspent_amount - .is_zero()); - - // Update genesis wallet and verify that all ingoing UTXOs are recorded - for rec_data in receiver_data_from_alice { - genesis_state_lock - .lock_guard_mut() - .await - .wallet_state - .expected_utxos - .add_expected_utxo( - rec_data.utxo.clone(), - rec_data.sender_randomness, - genesis_spending_key.privacy_preimage, - UtxoNotifier::Cli, - ) - .unwrap(); - } - for rec_data in receiver_data_from_bob { - genesis_state_lock - .lock_guard_mut() - .await - .wallet_state - .expected_utxos - .add_expected_utxo( - rec_data.utxo.clone(), - rec_data.sender_randomness, - genesis_spending_key.privacy_preimage, - UtxoNotifier::Cli, - ) - .unwrap(); - } - genesis_state_lock - .lock_guard_mut() - .await - .wallet_state - .expected_utxos - .add_expected_utxo( - cb_utxo_block_2, - cb_sender_randomness_block_2, - genesis_spending_key.privacy_preimage, - UtxoNotifier::Cli, - ) - .unwrap(); - genesis_state_lock - .lock_guard_mut() - .await - .wallet_state - .update_wallet_state_with_new_block( - &block_1.kernel.body.mutator_set_accumulator, - &block_2, - ) - .await - .unwrap(); - - // Verify that states and wallets can be updated successfully - assert_eq!( - 9, - genesis_state_lock.lock_guard().await - .wallet_state - .wallet_db - .monitored_utxos() - .len(), "Genesis receiver must have 9 UTXOs after block 2: 3 after block 1, and 6 added by block 2" - ); - - // Verify that mutator sets are updated correctly and that last block is block 2 - for state_lock in [&genesis_state_lock, &alice_state_lock, &bob_state_lock] { - let state = state_lock.lock_guard().await; - - assert_eq!( - block_2.kernel.body.mutator_set_accumulator, - state - .chain - .archival_state() - .archival_mutator_set - .ams() - .accumulator(), - "AMS must be correctly updated" - ); - assert_eq!( - block_2, - state.chain.archival_state().get_latest_block().await - ); - } + block_2.accumulate_transaction(tx_from_bob, &block_1.kernel.body.mutator_set_accumulator); } } diff --git a/src/util_types/mutator_set/removal_record.rs b/src/util_types/mutator_set/removal_record.rs index c0abe5db8..71f1f6772 100644 --- a/src/util_types/mutator_set/removal_record.rs +++ b/src/util_types/mutator_set/removal_record.rs @@ -133,15 +133,18 @@ pub struct RemovalRecord { } impl RemovalRecord { + /// Update a batch of removal records that are synced to a given mutator set, given + /// that that mutator set will be updated with an addition. (The addition record + /// does not matter; all necessary information is in the mutator set.) pub fn batch_update_from_addition>( removal_records: &mut [&mut Self], mutator_set: &mut MutatorSetKernel, - ) -> Result<(), Box> { + ) { let new_item_index = mutator_set.aocl.count_leaves(); // if window does not slide, do nothing if !MutatorSetKernel::::window_slides(new_item_index) { - return Ok(()); + return; } // window does slide @@ -236,8 +239,6 @@ impl RemovalRecord { new_chunk_digest, &mutator_set.swbf_inactive.get_peaks(), ); - - Ok(()) } pub fn batch_update_from_remove( @@ -474,18 +475,13 @@ mod removal_record_tests { let mp = accumulator.prove(item, sender_randomness, receiver_preimage); // Update all removal records from addition, then add the element - let update_res_rr = RemovalRecord::batch_update_from_addition( + RemovalRecord::batch_update_from_addition( &mut removal_records .iter_mut() .map(|x| &mut x.1) .collect::>(), &mut accumulator.kernel, ); - assert!( - update_res_rr.is_ok(), - "batch update must return OK, i = {}", - i - ); let update_res_mp = MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect::>(), &items, @@ -563,18 +559,13 @@ mod removal_record_tests { let mp = accumulator.prove(item, sender_randomness, receiver_preimage); // Update all removal records from addition, then add the element - let update_res_rr = RemovalRecord::batch_update_from_addition( + RemovalRecord::batch_update_from_addition( &mut removal_records .iter_mut() .map(|x| &mut x.1) .collect::>(), &mut accumulator.kernel, ); - assert!( - update_res_rr.is_ok(), - "batch update must return OK, i = {}", - i - ); let update_res_mp = MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect::>(), &items, From c85273fa8a8729859fec7bbb9d33cd3d3fd82c4b Mon Sep 17 00:00:00 2001 From: Alan Szepieniec Date: Mon, 12 Feb 2024 22:04:25 +0100 Subject: [PATCH 4/4] chore(mutator set): Drop generic type argument The dependence on the generic type argument `H` of trait `AlgebraicHasher` comes from having only the relatively slow Rescue-Prime and wanting to use the much faster Blake3 for tests. However, Tip5 is on par with Blake3 and used everywhere else in this codebase. --- src/mine_loop.rs | 6 +- src/models/blockchain/block/block_body.rs | 2 +- src/models/blockchain/block/mod.rs | 8 +- .../blockchain/block/mutator_set_update.rs | 26 +-- .../validity/correct_mutator_set_update.rs | 7 +- src/models/blockchain/transaction/mod.rs | 24 +-- .../transaction/transaction_kernel.rs | 11 +- .../validity/removal_records_integrity.rs | 2 +- .../tasm/compute_canonical_commitment.rs | 8 +- .../validity/tasm/compute_indices.rs | 12 +- .../tasm/hash_removal_record_indices.rs | 13 +- .../tasm/removal_records_integrity.rs | 4 +- .../validity/tasm/verify_aocl_membership.rs | 6 +- src/models/state/archival_state.rs | 16 +- src/models/state/mempool.rs | 2 +- src/models/state/mod.rs | 12 +- .../wallet/address/generation_address.rs | 5 +- src/models/state/wallet/monitored_utxo.rs | 9 +- .../state/wallet/utxo_notification_pool.rs | 10 +- src/models/state/wallet/wallet_state.rs | 17 +- src/models/state/wallet/wallet_status.rs | 3 +- src/tests/shared.rs | 20 +- src/util_types/mutator_set/active_window.rs | 64 +++--- src/util_types/mutator_set/addition_record.rs | 52 ++--- .../mutator_set/archival_mutator_set.rs | 141 ++++++------- .../mutator_set/chunk_dictionary.rs | 76 +++---- .../mutator_set/ms_membership_proof.rs | 153 +++++++------- .../mutator_set/mutator_set_accumulator.rs | 80 ++++---- .../mutator_set/mutator_set_kernel.rs | 188 +++++++++--------- .../mutator_set/mutator_set_trait.rs | 24 +-- src/util_types/mutator_set/removal_record.rs | 106 ++++------ .../mutator_set/rusty_archival_mutator_set.rs | 39 ++-- src/util_types/mutator_set/shared.rs | 42 ++-- src/util_types/test_shared/mutator_set.rs | 65 +++--- 34 files changed, 557 insertions(+), 696 deletions(-) diff --git a/src/mine_loop.rs b/src/mine_loop.rs index 158637b1f..8ce9a00c8 100644 --- a/src/mine_loop.rs +++ b/src/mine_loop.rs @@ -50,7 +50,7 @@ fn make_block_template( ) -> (BlockHeader, BlockBody) { let additions = transaction.kernel.outputs.clone(); let removals = transaction.kernel.inputs.clone(); - let mut next_mutator_set_accumulator: MutatorSetAccumulator = + let mut next_mutator_set_accumulator: MutatorSetAccumulator = previous_block.kernel.body.mutator_set_accumulator.clone(); // Apply the mutator set update to the mutator set accumulator @@ -176,7 +176,7 @@ fn make_coinbase_transaction( receiver_digest: Digest, wallet_secret: &WalletSecret, block_height: BlockHeight, - mutator_set_accumulator: MutatorSetAccumulator, + mutator_set_accumulator: MutatorSetAccumulator, ) -> (Transaction, Digest) { let sender_randomness: Digest = wallet_secret.generate_sender_randomness(block_height, receiver_digest); @@ -190,7 +190,7 @@ fn make_coinbase_transaction( .expect("Make coinbase transaction: failed to parse coin state as amount.") }) .sum(); - let coinbase_addition_record = commit::( + let coinbase_addition_record = commit( Hash::hash(coinbase_utxo), sender_randomness, receiver_digest, diff --git a/src/models/blockchain/block/block_body.rs b/src/models/blockchain/block/block_body.rs index a03010e6d..a69cd2dc2 100644 --- a/src/models/blockchain/block/block_body.rs +++ b/src/models/blockchain/block/block_body.rs @@ -37,7 +37,7 @@ pub struct BlockBody { /// The mutator set accumulator represents the UTXO set. It is simultaneously an /// accumulator (=> compact representation and membership proofs) and an anonymity /// construction (=> outputs from one transaction do not look like inputs to another). - pub mutator_set_accumulator: MutatorSetAccumulator, + pub mutator_set_accumulator: MutatorSetAccumulator, /// Lock-free UTXOs do not come with lock scripts and do not live in the mutator set. pub lock_free_mmr_accumulator: MmrAccumulator, diff --git a/src/models/blockchain/block/mod.rs b/src/models/blockchain/block/mod.rs index 7c1d8d358..e02b26e76 100644 --- a/src/models/blockchain/block/mod.rs +++ b/src/models/blockchain/block/mod.rs @@ -121,7 +121,7 @@ impl Block { } pub fn genesis_block() -> Self { - let mut genesis_mutator_set = MutatorSetAccumulator::::default(); + let mut genesis_mutator_set = MutatorSetAccumulator::default(); let mut ms_update = MutatorSetUpdate::default(); let premine_distribution = Self::premine_distribution(); @@ -141,7 +141,7 @@ impl Block { timestamp, public_announcements: vec![], coinbase: Some(total_premine_amount), - mutator_set_hash: MutatorSetAccumulator::::new().hash(), + mutator_set_hash: MutatorSetAccumulator::new().hash(), }, witness: Witness::Faith, }; @@ -157,7 +157,7 @@ impl Block { let receiver_digest = receiving_address.privacy_digest; // Add pre-mine UTXO to MutatorSet - let addition_record = commit::(utxo_digest, bad_randomness, receiver_digest); + let addition_record = commit(utxo_digest, bad_randomness, receiver_digest); ms_update.additions.push(addition_record); genesis_mutator_set.add(&addition_record); @@ -218,7 +218,7 @@ impl Block { pub fn accumulate_transaction( &mut self, transaction: Transaction, - old_mutator_set_accumulator: &MutatorSetAccumulator, + old_mutator_set_accumulator: &MutatorSetAccumulator, ) { // merge transactions let merged_timestamp = BFieldElement::new(max( diff --git a/src/models/blockchain/block/mutator_set_update.rs b/src/models/blockchain/block/mutator_set_update.rs index 8e6f447a7..ccaa376a3 100644 --- a/src/models/blockchain/block/mutator_set_update.rs +++ b/src/models/blockchain/block/mutator_set_update.rs @@ -1,25 +1,22 @@ -use anyhow::{bail, Result}; +use anyhow::Result; use serde::{Deserialize, Serialize}; -use crate::{ - models::blockchain::shared::Hash, - util_types::mutator_set::{ - addition_record::AdditionRecord, mutator_set_accumulator::MutatorSetAccumulator, - mutator_set_trait::MutatorSet, removal_record::RemovalRecord, - }, +use crate::util_types::mutator_set::{ + addition_record::AdditionRecord, mutator_set_accumulator::MutatorSetAccumulator, + mutator_set_trait::MutatorSet, removal_record::RemovalRecord, }; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] pub struct MutatorSetUpdate { // The ordering of the removal/addition records must match that of // the block. - pub removals: Vec>, + pub removals: Vec, pub additions: Vec, } impl MutatorSetUpdate { - pub fn new(removals: Vec>, additions: Vec) -> Self { + pub fn new(removals: Vec, additions: Vec) -> Self { Self { additions, removals, @@ -28,12 +25,12 @@ impl MutatorSetUpdate { /// Apply a mutator set update to a mutator set accumulator. Changes the input mutator set /// accumulator according to the provided additions and removals. - pub fn apply(&self, ms_accumulator: &mut MutatorSetAccumulator) -> Result<()> { + pub fn apply(&self, ms_accumulator: &mut MutatorSetAccumulator) -> Result<()> { let mut addition_records: Vec = self.additions.clone(); addition_records.reverse(); let mut removal_records = self.removals.clone(); removal_records.reverse(); - let mut removal_records: Vec<&mut RemovalRecord> = + let mut removal_records: Vec<&mut RemovalRecord> = removal_records.iter_mut().collect::>(); while let Some(addition_record) = addition_records.pop() { RemovalRecord::batch_update_from_addition( @@ -45,12 +42,7 @@ impl MutatorSetUpdate { } while let Some(removal_record) = removal_records.pop() { - let update_res = - RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record); - - if update_res.is_err() { - bail!("Failed to update removal records with addition record"); - } + RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record); ms_accumulator.remove(removal_record); } diff --git a/src/models/blockchain/block/validity/correct_mutator_set_update.rs b/src/models/blockchain/block/validity/correct_mutator_set_update.rs index 553dd5ed7..b76619f32 100644 --- a/src/models/blockchain/block/validity/correct_mutator_set_update.rs +++ b/src/models/blockchain/block/validity/correct_mutator_set_update.rs @@ -6,16 +6,13 @@ use tasm_lib::{ }; use crate::{ - models::{ - blockchain::shared::Hash, - consensus::{SecretWitness, SupportedClaim}, - }, + models::consensus::{SecretWitness, SupportedClaim}, util_types::mutator_set::mutator_set_accumulator::MutatorSetAccumulator, }; #[derive(Debug, Clone, BFieldCodec, GetSize, PartialEq, Eq, Serialize, Deserialize)] pub struct CorrectMutatorSetUpdateWitness { - previous_mutator_set_accumulator: MutatorSetAccumulator, + previous_mutator_set_accumulator: MutatorSetAccumulator, } impl SecretWitness for CorrectMutatorSetUpdateWitness { diff --git a/src/models/blockchain/transaction/mod.rs b/src/models/blockchain/transaction/mod.rs index 5ceddd737..c4a9186fb 100644 --- a/src/models/blockchain/transaction/mod.rs +++ b/src/models/blockchain/transaction/mod.rs @@ -58,10 +58,10 @@ pub struct TransactionPrimitiveWitness { pub input_lock_scripts: Vec, pub type_scripts: Vec, pub lock_script_witnesses: Vec>, - pub input_membership_proofs: Vec>, + pub input_membership_proofs: Vec, pub output_utxos: Vec, pub public_announcements: Vec, - pub mutator_set_accumulator: MutatorSetAccumulator, + pub mutator_set_accumulator: MutatorSetAccumulator, } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, GetSize, BFieldCodec)] @@ -89,18 +89,18 @@ impl Transaction { /// witnesses the witness data can be and is updated. pub fn update_mutator_set_records( &mut self, - previous_mutator_set_accumulator: &MutatorSetAccumulator, + previous_mutator_set_accumulator: &MutatorSetAccumulator, block: &Block, ) -> Result<()> { - let mut msa_state: MutatorSetAccumulator = previous_mutator_set_accumulator.clone(); + let mut msa_state: MutatorSetAccumulator = previous_mutator_set_accumulator.clone(); let block_addition_records: Vec = block.kernel.body.transaction.kernel.outputs.clone(); - let mut transaction_removal_records: Vec> = self.kernel.inputs.clone(); - let mut transaction_removal_records: Vec<&mut RemovalRecord> = + let mut transaction_removal_records: Vec = self.kernel.inputs.clone(); + let mut transaction_removal_records: Vec<&mut RemovalRecord> = transaction_removal_records.iter_mut().collect(); let mut block_removal_records = block.kernel.body.transaction.kernel.inputs.clone(); block_removal_records.reverse(); - let mut block_removal_records: Vec<&mut RemovalRecord> = + let mut block_removal_records: Vec<&mut RemovalRecord> = block_removal_records.iter_mut().collect::>(); // Apply all addition records in the block @@ -136,16 +136,14 @@ impl Transaction { while let Some(removal_record) = block_removal_records.pop() { // Batch update block's removal records to keep them valid after next removal - RemovalRecord::batch_update_from_remove(&mut block_removal_records, removal_record) - .expect("MS removal record update from remove must succeed in wallet handler"); + RemovalRecord::batch_update_from_remove(&mut block_removal_records, removal_record); // batch update transaction's removal records // Batch update block's removal records to keep them valid after next removal RemovalRecord::batch_update_from_remove( &mut transaction_removal_records, removal_record, - ) - .expect("MS removal record update from remove must succeed in wallet handler"); + ); // Batch update primitive witness membership proofs if let Witness::Primitive(witness) = &mut self.witness { @@ -320,7 +318,7 @@ impl Transaction { /// window Bloom filter, and whether the MMR membership proofs are valid. pub fn is_confirmable_relative_to( &self, - mutator_set_accumulator: &MutatorSetAccumulator, + mutator_set_accumulator: &MutatorSetAccumulator, ) -> bool { self.kernel .inputs @@ -530,7 +528,7 @@ mod transaction_tests { coins: NeptuneCoins::new(42).to_native_coins(), lock_script_hash: LockScript::anyone_can_spend().hash(), }; - let ar = commit::(Hash::hash(&output_1), random(), random()); + let ar = commit(Hash::hash(&output_1), random(), random()); // Verify that a sane timestamp is returned. `make_mock_transaction` must follow // the correct time convention for this test to work. diff --git a/src/models/blockchain/transaction/transaction_kernel.rs b/src/models/blockchain/transaction/transaction_kernel.rs index 68a06f251..9e21c7e2b 100644 --- a/src/models/blockchain/transaction/transaction_kernel.rs +++ b/src/models/blockchain/transaction/transaction_kernel.rs @@ -13,12 +13,9 @@ use twenty_first::shared_math::{ }; use super::{neptune_coins::pseudorandom_amount, NeptuneCoins, PublicAnnouncement}; -use crate::{ - util_types::mutator_set::{ - addition_record::{pseudorandom_addition_record, AdditionRecord}, - removal_record::{pseudorandom_removal_record, RemovalRecord}, - }, - Hash, +use crate::util_types::mutator_set::{ + addition_record::{pseudorandom_addition_record, AdditionRecord}, + removal_record::{pseudorandom_removal_record, RemovalRecord}, }; pub fn pseudorandom_public_announcement(seed: [u8; 32]) -> PublicAnnouncement { @@ -30,7 +27,7 @@ pub fn pseudorandom_public_announcement(seed: [u8; 32]) -> PublicAnnouncement { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, GetSize, BFieldCodec, TasmObject)] pub struct TransactionKernel { - pub inputs: Vec>, + pub inputs: Vec, // `outputs` contains the commitments (addition records) that go into the AOCL pub outputs: Vec, diff --git a/src/models/blockchain/transaction/validity/removal_records_integrity.rs b/src/models/blockchain/transaction/validity/removal_records_integrity.rs index e5b6b8dcf..6ab08eca7 100644 --- a/src/models/blockchain/transaction/validity/removal_records_integrity.rs +++ b/src/models/blockchain/transaction/validity/removal_records_integrity.rs @@ -40,7 +40,7 @@ use crate::{ )] pub struct RemovalRecordsIntegrityWitness { pub input_utxos: Vec, - pub membership_proofs: Vec>, + pub membership_proofs: Vec, pub aocl: MmrAccumulator, pub swbfi: MmrAccumulator, pub swbfa_hash: Digest, diff --git a/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs b/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs index e59d61f26..4ee18158a 100644 --- a/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs +++ b/src/models/blockchain/transaction/validity/tasm/compute_canonical_commitment.rs @@ -45,7 +45,7 @@ impl BasicSnippet for ComputeCanonicalCommitment { } fn code(&self, library: &mut Library) -> Vec { - type MsMpH = MsMembershipProof; + type MsMpH = MsMembershipProof; let mp_to_sr = tasm_lib::field!(MsMpH::sender_randomness); let mp_to_rp = tasm_lib::field!(MsMpH::receiver_preimage); let commit = library.import(Box::new(Commit)); @@ -135,7 +135,7 @@ impl Function for ComputeCanonicalCommitment { } // decode object - let membership_proof = *MsMembershipProof::::decode(&encoding).unwrap(); + let membership_proof = *MsMembershipProof::decode(&encoding).unwrap(); // compute commitment println!("receiver_preimage: {}", membership_proof.receiver_preimage); @@ -146,7 +146,7 @@ impl Function for ComputeCanonicalCommitment { membership_proof.sender_randomness ); println!("\nitem:\n{}", item); - let c = commit::(item, membership_proof.sender_randomness, receiver_digest); + let c = commit(item, membership_proof.sender_randomness, receiver_digest); // push onto stack stack.push(mp_pointer); @@ -165,7 +165,7 @@ impl Function for ComputeCanonicalCommitment { let mut rng: StdRng = SeedableRng::from_seed(seed); // generate random ms membership proof object - let membership_proof = pseudorandom_mutator_set_membership_proof::(rng.gen()); + let membership_proof = pseudorandom_mutator_set_membership_proof(rng.gen()); // populate memory, with the size of the encoding prepended let address = BFieldElement::new(rng.next_u64() % (1 << 20)); diff --git a/src/models/blockchain/transaction/validity/tasm/compute_indices.rs b/src/models/blockchain/transaction/validity/tasm/compute_indices.rs index f4e4cc415..470bbee9f 100644 --- a/src/models/blockchain/transaction/validity/tasm/compute_indices.rs +++ b/src/models/blockchain/transaction/validity/tasm/compute_indices.rs @@ -46,7 +46,7 @@ impl BasicSnippet for ComputeIndices { } fn code(&self, library: &mut Library) -> Vec { - type MsMpH = MsMembershipProof; + type MsMpH = MsMembershipProof; let mp_to_sr = tasm_lib::field!(MsMpH::sender_randomness); let mp_to_rp = tasm_lib::field!(MsMpH::receiver_preimage); let mp_to_ap = tasm_lib::field!(MsMpH::auth_path_aocl); @@ -159,7 +159,7 @@ impl Function for ComputeIndices { for i in 0..size { sequence.push(*memory.get(&BFieldElement::new(2u64 + i)).unwrap()); } - let msmp = *MsMembershipProof::::decode(&sequence).unwrap(); + let msmp = *MsMembershipProof::decode(&sequence).unwrap(); let leaf_index = msmp.auth_path_aocl.leaf_index; let leaf_index_hi = leaf_index >> 32; let leaf_index_lo = leaf_index & (u32::MAX as u64); @@ -218,8 +218,7 @@ impl Function for ComputeIndices { let mut rng: StdRng = SeedableRng::from_seed(seed); - let mut msmp = - pseudorandom_mutator_set_membership_proof::(rand::Rng::gen(&mut rng)); + let mut msmp = pseudorandom_mutator_set_membership_proof(rand::Rng::gen(&mut rng)); msmp.auth_path_aocl.leaf_index = rng.next_u32() as u64; let msmp_encoded = twenty_first::shared_math::bfield_codec::BFieldCodec::encode(&msmp); @@ -271,7 +270,6 @@ mod tests { use triton_vm::prelude::NonDeterminism; use twenty_first::shared_math::bfield_codec::BFieldCodec; - use crate::models::blockchain::shared::Hash; use crate::util_types::mutator_set::mutator_set_kernel::get_swbf_indices; use super::*; @@ -292,7 +290,7 @@ mod tests { // sample membership proofs let membership_proofs = (0..num_items) - .map(|_| pseudorandom_mutator_set_membership_proof::(rng.gen())) + .map(|_| pseudorandom_mutator_set_membership_proof(rng.gen())) .collect_vec(); // sample items @@ -400,7 +398,7 @@ mod tests { .into_iter() .zip(membership_proofs) .map(|(item, mp)| { - get_swbf_indices::( + get_swbf_indices( item, mp.sender_randomness, mp.receiver_preimage, diff --git a/src/models/blockchain/transaction/validity/tasm/hash_removal_record_indices.rs b/src/models/blockchain/transaction/validity/tasm/hash_removal_record_indices.rs index 2f0cf6433..56271ce8f 100644 --- a/src/models/blockchain/transaction/validity/tasm/hash_removal_record_indices.rs +++ b/src/models/blockchain/transaction/validity/tasm/hash_removal_record_indices.rs @@ -40,7 +40,7 @@ impl BasicSnippet for HashRemovalRecordIndices { } fn code(&self, library: &mut Library) -> Vec { - type Rrh = RemovalRecord; + type Rrh = RemovalRecord; let rr_to_ais_with_size = tasm_lib::field_with_size!(Rrh::absolute_indices); let hash_varlen = library.import(Box::new(HashVarlen)); let entrypoint = self.entrypoint(); @@ -79,7 +79,7 @@ impl Function for HashRemovalRecordIndices { for i in 0..size { encoding.push(*memory.get(&(address + BFieldElement::new(i))).unwrap()); } - let removal_record = *RemovalRecord::::decode(&encoding).unwrap(); + let removal_record = *RemovalRecord::decode(&encoding).unwrap(); // hash absolute index set let digest = Hash::hash_varlen(&removal_record.absolute_indices.encode()); @@ -98,7 +98,7 @@ impl Function for HashRemovalRecordIndices { _bench_case: Option, ) -> FunctionInitialState { let mut rng: StdRng = SeedableRng::from_seed(seed); - let removal_record = pseudorandom_removal_record::(rng.gen()); + let removal_record = pseudorandom_removal_record(rng.gen()); let address: BFieldElement = BFieldElement::new(rng.gen_range(2..(1 << 20))); let mut memory: HashMap = HashMap::new(); @@ -160,7 +160,7 @@ mod tests { // generate removal records list let num_removal_records = 2; let removal_records = (0..num_removal_records) - .map(|_| pseudorandom_removal_record::(rng.gen())) + .map(|_| pseudorandom_removal_record(rng.gen())) .collect_vec(); let address = BFieldElement::new(rng.gen_range(2..(1 << 20))); @@ -173,7 +173,7 @@ mod tests { // populate memory let mut memory: HashMap = HashMap::new(); let removal_records_encoded = removal_records.encode(); - Vec::>::decode(&removal_records_encoded).unwrap(); + Vec::::decode(&removal_records_encoded).unwrap(); for (i, v) in removal_records_encoded.iter().enumerate() { memory.insert(address + BFieldElement::new(i as u64), *v); } @@ -240,8 +240,7 @@ mod tests { .unwrap(), ); } - read_removal_records - .push(*RemovalRecord::::decode(&removal_record_encoding).unwrap()); + read_removal_records.push(*RemovalRecord::decode(&removal_record_encoding).unwrap()); } // assert equality of removal records lists diff --git a/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs b/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs index 156d4109f..5a64718da 100644 --- a/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs +++ b/src/models/blockchain/transaction/validity/tasm/removal_records_integrity.rs @@ -129,7 +129,7 @@ impl CompiledProgram for RemovalRecordsIntegrity { .iter() .zip(removal_record_integrity_witness.membership_proofs.iter()) .map(|(&item, msmp)| { - AbsoluteIndexSet::new(&get_swbf_indices::( + AbsoluteIndexSet::new(&get_swbf_indices( item, msmp.sender_randomness, msmp.receiver_preimage, @@ -157,7 +157,7 @@ impl CompiledProgram for RemovalRecordsIntegrity { .zip(removal_record_integrity_witness.membership_proofs.iter()) .map(|(item, msmp)| { ( - commit::( + commit( item, msmp.sender_randomness, msmp.receiver_preimage.hash::(), diff --git a/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs b/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs index 12148b5e6..0da0b4be7 100644 --- a/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs +++ b/src/models/blockchain/transaction/validity/tasm/verify_aocl_membership.rs @@ -57,7 +57,7 @@ impl BasicSnippet for VerifyAoclMembership { // We do not need to use get field for MmrMembershipProof because // it has a custom implementation of BFieldCodec. However, we do // need it for MsMembershipProof. - type MsMpH = MsMembershipProof; + type MsMpH = MsMembershipProof; type MmrMpH = MmrMembershipProof; let msmp_to_mmrmp = tasm_lib::field!(MsMpH::auth_path_aocl); let mmr_mp_to_li = tasm_lib::field!(MmrMpH::leaf_index); @@ -164,7 +164,7 @@ impl Function for VerifyAoclMembership { for i in 0..mp_size { mp_encoding.push(*memory.get(&(mp_ptr + BFieldElement::new(i))).unwrap()); } - let memproof = *MsMembershipProof::::decode(&mp_encoding).unwrap(); + let memproof = *MsMembershipProof::decode(&mp_encoding).unwrap(); println!("memproof li: {}", memproof.auth_path_aocl.leaf_index); println!( "memproof ap: {}", @@ -199,7 +199,7 @@ impl Function for VerifyAoclMembership { let leaf_index = rng.next_u64() % num_leafs; let leaf = mmr.get_leaf(leaf_index); let (mmr_mp, peaks) = mmr.prove_membership(leaf_index); - let mut msmp = pseudorandom_mutator_set_membership_proof::(rng.gen()); + let mut msmp = pseudorandom_mutator_set_membership_proof(rng.gen()); msmp.auth_path_aocl = mmr_mp; // populate memory diff --git a/src/models/state/archival_state.rs b/src/models/state/archival_state.rs index d9419bf93..3b2096d7e 100644 --- a/src/models/state/archival_state.rs +++ b/src/models/state/archival_state.rs @@ -20,7 +20,6 @@ use crate::config_models::data_directory::DataDirectory; use crate::database::{create_db_if_missing, NeptuneLevelDb}; use crate::models::blockchain::block::block_header::{BlockHeader, PROOF_OF_WORK_COUNT_U32_SIZE}; use crate::models::blockchain::block::{block_height::BlockHeight, Block}; -use crate::models::blockchain::shared::Hash; use crate::models::database::{ BlockFileLocation, BlockIndexKey, BlockIndexValue, BlockRecord, FileRecord, LastFileRecord, }; @@ -60,7 +59,7 @@ pub struct ArchivalState { // The archival mutator set is persisted to one database that also records a sync label, // which corresponds to the hash of the block to which the mutator set is synced. - pub archival_mutator_set: RustyArchivalMutatorSet, + pub archival_mutator_set: RustyArchivalMutatorSet, } // The only reason we have this `Debug` implementation is that it's required @@ -95,7 +94,7 @@ impl ArchivalState { /// Initialize an `ArchivalMutatorSet` by opening or creating its databases. pub async fn initialize_mutator_set( data_dir: &DataDirectory, - ) -> Result> { + ) -> Result { let ms_db_dir_path = data_dir.mutator_set_database_dir_path(); DataDirectory::create_dir_if_not_exists(&ms_db_dir_path)?; @@ -118,7 +117,7 @@ impl ArchivalState { } }; - let mut archival_set = RustyArchivalMutatorSet::::connect(db); + let mut archival_set = RustyArchivalMutatorSet::connect(db); archival_set.restore_or_new(); Ok(archival_set) @@ -190,7 +189,7 @@ impl ArchivalState { pub async fn new( data_dir: DataDirectory, block_index_db: NeptuneLevelDb, - mut archival_mutator_set: RustyArchivalMutatorSet, + mut archival_mutator_set: RustyArchivalMutatorSet, ) -> Self { let genesis_block = Box::new(Block::genesis_block()); @@ -730,7 +729,7 @@ impl ArchivalState { .inputs .clone(); removal_records.reverse(); - let mut removal_records: Vec<&mut RemovalRecord> = + let mut removal_records: Vec<&mut RemovalRecord> = removal_records.iter_mut().collect::>(); // Add items, thus adding the output UTXOs to the mutator set @@ -748,10 +747,7 @@ impl ArchivalState { // Remove items, thus removing the input UTXOs from the mutator set while let Some(removal_record) = removal_records.pop() { // Batch-update all removal records to keep them valid after next removal - RemovalRecord::batch_update_from_remove( - &mut removal_records, - removal_record, - ).expect("MS removal record update from remove must succeed in update_mutator_set as block should already be verified"); + RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record); // Remove the element from the mutator set self.archival_mutator_set.ams_mut().remove(removal_record); diff --git a/src/models/state/mempool.rs b/src/models/state/mempool.rs index 43b8889d3..9209d2b74 100644 --- a/src/models/state/mempool.rs +++ b/src/models/state/mempool.rs @@ -294,7 +294,7 @@ impl Mempool { /// transactions that were not removed due to being included in the block. pub fn update_with_block( &mut self, - previous_mutator_set_accumulator: MutatorSetAccumulator, + previous_mutator_set_accumulator: MutatorSetAccumulator, block: &Block, ) { // Check if the sets of inserted indices in the block transaction diff --git a/src/models/state/mod.rs b/src/models/state/mod.rs index 87f96493d..d63af1997 100644 --- a/src/models/state/mod.rs +++ b/src/models/state/mod.rs @@ -383,14 +383,14 @@ impl GlobalState { + fee; // todo: accomodate a future change whereby this function also returns the matching spending keys - let spendable_utxos_and_mps: Vec<(Utxo, LockScript, MsMembershipProof)> = self + let spendable_utxos_and_mps: Vec<(Utxo, LockScript, MsMembershipProof)> = self .wallet_state .allocate_sufficient_input_funds_from_lock(total_spend, bc_tip.hash()) .await?; // Create all removal records. These must be relative to the block tip. let msa_tip = &bc_tip.kernel.body.mutator_set_accumulator; - let mut inputs: Vec> = vec![]; + let mut inputs: Vec = vec![]; let mut input_amount: NeptuneCoins = NeptuneCoins::zero(); for (spendable_utxo, _lock_script, mp) in spendable_utxos_and_mps.iter() { let removal_record = msa_tip.kernel.drop(Hash::hash(spendable_utxo), mp); @@ -402,7 +402,7 @@ impl GlobalState { let mut transaction_outputs: Vec = vec![]; let mut output_utxos: Vec = vec![]; for rd in receiver_data.iter() { - let addition_record = commit::( + let addition_record = commit( Hash::hash(&rd.utxo), rd.sender_randomness, rd.receiver_privacy_digest, @@ -437,7 +437,7 @@ impl GlobalState { .wallet_state .wallet_secret .generate_sender_randomness(bc_tip.kernel.header.height, receiver_digest); - let change_addition_record = commit::( + let change_addition_record = commit( Hash::hash(&change_utxo), change_sender_randomness, receiver_digest, @@ -772,7 +772,7 @@ impl GlobalState { .await?; let previous_mutator_set = match maybe_revert_block_predecessor { Some(block) => block.kernel.body.mutator_set_accumulator, - None => MutatorSetAccumulator::::default(), + None => MutatorSetAccumulator::default(), }; debug!("MUTXO confirmed at height {confirming_block_height}, reverting for height {} on abandoned chain", revert_block.kernel.header.height); @@ -824,7 +824,7 @@ impl GlobalState { .await?; let mut block_msa = match maybe_apply_block_predecessor { Some(block) => block.kernel.body.mutator_set_accumulator, - None => MutatorSetAccumulator::::default(), + None => MutatorSetAccumulator::default(), }; let addition_records = apply_block.kernel.body.transaction.kernel.outputs; let removal_records = apply_block.kernel.body.transaction.kernel.inputs; diff --git a/src/models/state/wallet/address/generation_address.rs b/src/models/state/wallet/address/generation_address.rs index aec7a8559..f38999a0b 100644 --- a/src/models/state/wallet/address/generation_address.rs +++ b/src/models/state/wallet/address/generation_address.rs @@ -196,8 +196,7 @@ impl SpendingKey { // Note: the commitment is computed in the same way as in the mutator set. let receiver_preimage = self.privacy_preimage; let receiver_digest = receiver_preimage.hash::(); - let addition_record = - commit::(Hash::hash(&utxo), sender_randomness, receiver_digest); + let addition_record = commit(Hash::hash(&utxo), sender_randomness, receiver_digest); // push to list received_utxos_with_randomnesses.push(( @@ -592,7 +591,7 @@ mod test_generation_addresses { announced_txs[0].clone(); assert_eq!(utxo, read_utxo); - let expected_addition_record = commit::( + let expected_addition_record = commit( Hash::hash(&utxo), sender_randomness, receiving_address.privacy_digest, diff --git a/src/models/state/wallet/monitored_utxo.rs b/src/models/state/wallet/monitored_utxo.rs index e0661ab2e..9fae7b3b0 100644 --- a/src/models/state/wallet/monitored_utxo.rs +++ b/src/models/state/wallet/monitored_utxo.rs @@ -5,7 +5,6 @@ use std::{collections::VecDeque, time::Duration}; use crate::{ models::{blockchain::block::block_height::BlockHeight, state::archival_state::ArchivalState}, util_types::mutator_set::ms_membership_proof::MsMembershipProof, - Hash, }; use serde::{Deserialize, Serialize}; use twenty_first::shared_math::tip5::Digest; @@ -17,7 +16,7 @@ pub struct MonitoredUtxo { pub utxo: Utxo, // Mapping from block digest to membership proof - pub blockhash_to_membership_proof: VecDeque<(Digest, MsMembershipProof)>, + pub blockhash_to_membership_proof: VecDeque<(Digest, MsMembershipProof)>, pub number_of_mps_per_utxo: usize, @@ -52,7 +51,7 @@ impl MonitoredUtxo { pub fn add_membership_proof_for_tip( &mut self, block_digest: Digest, - updated_membership_proof: MsMembershipProof, + updated_membership_proof: MsMembershipProof, ) { while self.blockhash_to_membership_proof.len() >= self.number_of_mps_per_utxo { self.blockhash_to_membership_proof.pop_back(); @@ -65,14 +64,14 @@ impl MonitoredUtxo { pub fn get_membership_proof_for_block( &self, block_digest: Digest, - ) -> Option> { + ) -> Option { self.blockhash_to_membership_proof .iter() .find(|x| x.0 == block_digest) .map(|x| x.1.clone()) } - pub fn get_latest_membership_proof_entry(&self) -> Option<(Digest, MsMembershipProof)> { + pub fn get_latest_membership_proof_entry(&self) -> Option<(Digest, MsMembershipProof)> { self.blockhash_to_membership_proof.iter().next().cloned() } diff --git a/src/models/state/wallet/utxo_notification_pool.rs b/src/models/state/wallet/utxo_notification_pool.rs index deebafb07..1e1573390 100644 --- a/src/models/state/wallet/utxo_notification_pool.rs +++ b/src/models/state/wallet/utxo_notification_pool.rs @@ -49,7 +49,7 @@ impl ExpectedUtxo { received_from: UtxoNotifier, ) -> Self { Self { - addition_record: commit::( + addition_record: commit( Hash::hash(&utxo), sender_randomness, receiver_preimage.hash::(), @@ -228,7 +228,7 @@ impl UtxoNotificationPool { // TODO: Add check that we can actually unlock the UTXO's lock script. // Also check that receiver preimage belongs to us etc. // Or should this be the caller's responsibility? - let addition_record = commit::( + let addition_record = commit( Hash::hash(&utxo), sender_randomness, receiver_preimage.hash::(), @@ -377,7 +377,7 @@ mod wallet_state_tests { let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); let peer_instance_id: InstanceId = random(); - let expected_addition_record = commit::( + let expected_addition_record = commit( Hash::hash(&mock_utxo), sender_randomness, receiver_preimage.hash::(), @@ -405,7 +405,7 @@ mod wallet_state_tests { assert_eq!(1, ret_with_tx_containing_utxo.len()); // Call scan but with another input. Verify that it returns the empty list - let another_addition_record = commit::( + let another_addition_record = commit( Hash::hash(&mock_utxo), random(), receiver_preimage.hash::(), @@ -501,7 +501,7 @@ mod wallet_state_tests { // notification can be stored. notification_pool .mark_as_received( - commit::( + commit( Hash::hash(&mock_utxo), first_sender_randomness, receiver_preimage.hash::(), diff --git a/src/models/state/wallet/wallet_state.rs b/src/models/state/wallet/wallet_state.rs index 628ef4db1..e73ba2e95 100644 --- a/src/models/state/wallet/wallet_state.rs +++ b/src/models/state/wallet/wallet_state.rs @@ -279,7 +279,7 @@ impl WalletState { /// is valid and that the wallet state is not up to date yet. pub async fn update_wallet_state_with_new_block( &mut self, - current_mutator_set_accumulator: &MutatorSetAccumulator, + current_mutator_set_accumulator: &MutatorSetAccumulator, new_block: &Block, ) -> Result<()> { let transaction: Transaction = new_block.kernel.body.transaction.clone(); @@ -325,7 +325,7 @@ impl WalletState { // to be updated to the mutator set of the new block. let mut valid_membership_proofs_and_own_utxo_count: HashMap< StrongUtxoKey, - (MsMembershipProof, u64), + (MsMembershipProof, u64), > = HashMap::default(); for (i, monitored_utxo) in monitored_utxos.iter() { let utxo_digest = Hash::hash(&monitored_utxo.utxo); @@ -369,11 +369,11 @@ impl WalletState { // a) Update all existing MS membership proofs // b) Register incoming transactions and derive their membership proofs let mut changed_mps = vec![]; - let mut msa_state: MutatorSetAccumulator = current_mutator_set_accumulator.clone(); + let mut msa_state: MutatorSetAccumulator = current_mutator_set_accumulator.clone(); let mut removal_records = transaction.kernel.inputs.clone(); removal_records.reverse(); - let mut removal_records: Vec<&mut RemovalRecord> = + let mut removal_records: Vec<&mut RemovalRecord> = removal_records.iter_mut().collect::>(); for addition_record in new_block.kernel.body.transaction.kernel.outputs.iter() { @@ -496,8 +496,7 @@ impl WalletState { }; // Batch update removal records to keep them valid after next removal - RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record) - .expect("MS removal record update from remove must succeed in wallet handler"); + RemovalRecord::batch_update_from_remove(&mut removal_records, removal_record); // TODO: We mark membership proofs as spent, so they can be deleted. But // how do we ensure that we can recover them in case of a fork? For now we maintain @@ -660,7 +659,7 @@ impl WalletState { &self, requested_amount: NeptuneCoins, tip_digest: Digest, - ) -> Result)>> { + ) -> Result> { // TODO: Should return the correct spending keys associated with the UTXOs // We only attempt to generate a transaction using those UTXOs that have up-to-date // membership proofs. @@ -676,7 +675,7 @@ impl WalletState { tip_digest.emojihash()); } - let mut ret: Vec<(Utxo, LockScript, MsMembershipProof)> = vec![]; + let mut ret: Vec<(Utxo, LockScript, MsMembershipProof)> = vec![]; let mut allocated_amount = NeptuneCoins::zero(); let lock_script = self .wallet_secret @@ -703,7 +702,7 @@ impl WalletState { &self, requested_amount: NeptuneCoins, tip_digest: Digest, - ) -> Result)>> { + ) -> Result> { self.allocate_sufficient_input_funds_from_lock(requested_amount, tip_digest) .await } diff --git a/src/models/state/wallet/wallet_status.rs b/src/models/state/wallet/wallet_status.rs index 467a125f4..ebf491db9 100644 --- a/src/models/state/wallet/wallet_status.rs +++ b/src/models/state/wallet/wallet_status.rs @@ -5,7 +5,6 @@ use serde::{Deserialize, Serialize}; use crate::models::blockchain::transaction::{neptune_coins::NeptuneCoins, utxo::Utxo}; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; -use crate::Hash; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct WalletStatusElement(pub u64, pub Utxo); @@ -20,7 +19,7 @@ impl Display for WalletStatusElement { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct WalletStatus { pub synced_unspent_amount: NeptuneCoins, - pub synced_unspent: Vec<(WalletStatusElement, MsMembershipProof)>, + pub synced_unspent: Vec<(WalletStatusElement, MsMembershipProof)>, pub unsynced_unspent_amount: NeptuneCoins, pub unsynced_unspent: Vec, pub synced_spent_amount: NeptuneCoins, diff --git a/src/tests/shared.rs b/src/tests/shared.rs index 5996b7049..4a0f6c297 100644 --- a/src/tests/shared.rs +++ b/src/tests/shared.rs @@ -452,7 +452,7 @@ pub fn pseudorandom_removal_record_integrity_witness( .iter() .zip(membership_proofs.iter()) .map(|(utxo, msmp)| { - commit::( + commit( Hash::hash(utxo), msmp.sender_randomness, msmp.receiver_preimage.hash::(), @@ -496,7 +496,7 @@ pub fn pseudorandom_removal_record_integrity_witness( msmp.auth_path_aocl.leaf_index, ) }) - .map(|(item, sr, rp, li)| get_swbf_indices::(item, sr, rp, li)) + .map(|(item, sr, rp, li)| get_swbf_indices(item, sr, rp, li)) .map(|ais| RemovalRecord { absolute_indices: AbsoluteIndexSet::new(&ais), target_chunks: pseudorandom_chunk_dictionary(rng.gen()), @@ -732,14 +732,10 @@ pub fn random_option(thing: T) -> Option { // TODO: Consider moving this to to the appropriate place in global state, // keep fn interface. Can be helper function to `create_transaction`. pub fn make_mock_transaction_with_generation_key( - input_utxos_mps_keys: Vec<( - Utxo, - MsMembershipProof, - generation_address::SpendingKey, - )>, + input_utxos_mps_keys: Vec<(Utxo, MsMembershipProof, generation_address::SpendingKey)>, receiver_data: Vec, fee: NeptuneCoins, - tip_msa: MutatorSetAccumulator, + tip_msa: MutatorSetAccumulator, ) -> Transaction { // Generate removal records let mut inputs = vec![]; @@ -750,7 +746,7 @@ pub fn make_mock_transaction_with_generation_key( let mut outputs = vec![]; for rd in receiver_data.iter() { - let addition_record = commit::( + let addition_record = commit( Hash::hash(&rd.utxo), rd.sender_randomness, rd.receiver_privacy_digest, @@ -825,7 +821,7 @@ pub fn make_mock_transaction_with_generation_key( // `make_mock_transaction`, in contrast to `make_mock_transaction2`, assumes you // already have created `DevNetInput`s. pub fn make_mock_transaction( - inputs: Vec>, + inputs: Vec, outputs: Vec, ) -> Transaction { let timestamp: BFieldElement = BFieldElement::new( @@ -853,7 +849,7 @@ pub fn make_mock_transaction( // TODO: Change this function into something more meaningful! pub fn make_mock_transaction_with_wallet( - inputs: Vec>, + inputs: Vec, outputs: Vec, fee: NeptuneCoins, _wallet_state: &WalletState, @@ -912,7 +908,7 @@ pub fn make_mock_block( let coinbase_digest: Digest = Hash::hash(&coinbase_utxo); let coinbase_addition_record: AdditionRecord = - commit::(coinbase_digest, coinbase_output_randomness, receiver_digest); + commit(coinbase_digest, coinbase_output_randomness, receiver_digest); next_mutator_set.add(&coinbase_addition_record); let block_timestamp = match block_timestamp { diff --git a/src/util_types/mutator_set/active_window.rs b/src/util_types/mutator_set/active_window.rs index 0644e3ba5..8ed8496a2 100644 --- a/src/util_types/mutator_set/active_window.rs +++ b/src/util_types/mutator_set/active_window.rs @@ -3,40 +3,33 @@ use crate::prelude::twenty_first; use get_size::GetSize; use itertools::Itertools; use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; use std::ops::Range; use twenty_first::shared_math::bfield_codec::BFieldCodec; -use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use super::chunk::Chunk; use super::shared::{CHUNK_SIZE, WINDOW_SIZE}; #[derive(Clone, Debug, Eq, Serialize, Deserialize, GetSize, BFieldCodec)] -pub struct ActiveWindow { +pub struct ActiveWindow { // It's OK to store this in memory, since it's on the size of kilobytes, not gigabytes. pub sbf: Vec, - #[bfield_codec(ignore)] - _hasher: PhantomData, } -impl PartialEq for ActiveWindow { +impl PartialEq for ActiveWindow { fn eq(&self, other: &Self) -> bool { self.sbf == other.sbf } } -impl Default for ActiveWindow { +impl Default for ActiveWindow { fn default() -> Self { Self::new() } } -impl ActiveWindow { +impl ActiveWindow { pub fn new() -> Self { - Self { - sbf: Vec::new(), - _hasher: PhantomData, - } + Self { sbf: Vec::new() } } /// Grab a slice from the sparse Bloom filter by supplying an @@ -174,7 +167,6 @@ impl ActiveWindow { pub fn from_vec_u32(vector: &[u32]) -> Self { Self { sbf: vector.to_vec(), - _hasher: PhantomData, } } } @@ -182,23 +174,22 @@ impl ActiveWindow { #[cfg(test)] mod active_window_tests { + use crate::models::blockchain::shared::Hash; + use super::*; use rand::{thread_rng, RngCore}; - use twenty_first::shared_math::tip5::Tip5; + use tasm_lib::twenty_first::util_types::algebraic_hasher::AlgebraicHasher; - impl ActiveWindow { + impl ActiveWindow { fn new_from(sbf: Vec) -> Self { - Self { - sbf, - _hasher: PhantomData, - } + Self { sbf } } } #[test] fn aw_is_reversible_bloom_filter() { let sbf = Vec::::new(); - let mut aw = ActiveWindow::::new_from(sbf); + let mut aw = ActiveWindow::new_from(sbf); // Insert an index twice, remove it once and the verify that // it is still there @@ -217,7 +208,7 @@ mod active_window_tests { #[test] fn insert_remove_probe_indices_pbt() { let sbf = Vec::::new(); - let mut aw = ActiveWindow::::new_from(sbf); + let mut aw = ActiveWindow::new_from(sbf); for i in 0..100 { assert!(!aw.contains(i as u32)); } @@ -242,7 +233,7 @@ mod active_window_tests { #[test] fn test_slide_window() { - let mut aw = ActiveWindow::::new(); + let mut aw = ActiveWindow::new(); let num_insertions = 100; let mut rng = thread_rng(); @@ -258,9 +249,7 @@ mod active_window_tests { #[test] fn test_slide_window_back() { - type Hasher = Tip5; - - let mut active_window = ActiveWindow::::new(); + let mut active_window = ActiveWindow::new(); let num_insertions = 1000; let mut rng = thread_rng(); for _ in 0..num_insertions { @@ -278,9 +267,7 @@ mod active_window_tests { #[test] fn test_slide_window_and_back() { - type Hasher = Tip5; - - let mut active_window = ActiveWindow::::new(); + let mut active_window = ActiveWindow::new(); let num_insertions = 1000; let mut rng = thread_rng(); for _ in 0..num_insertions { @@ -301,14 +288,14 @@ mod active_window_tests { ); } - fn hash_unequal_prop() { - H::hash(&ActiveWindow::::new()); + fn hash_unequal_prop() { + Hash::hash(&ActiveWindow::new()); - let mut aw_1 = ActiveWindow::::new(); + let mut aw_1 = ActiveWindow::new(); aw_1.insert(1u32); - let aw_2 = ActiveWindow::::new(); + let aw_2 = ActiveWindow::new(); - assert_ne!(H::hash(&aw_1), H::hash(&aw_2)); + assert_ne!(Hash::hash(&aw_1), Hash::hash(&aw_2)); } #[test] @@ -316,25 +303,22 @@ mod active_window_tests { // This is just a test to ensure that the hashing of the active part of the SWBF // works in the runtime, for relevant hash functions. It also tests that different // indices being inserted results in different digests. - hash_unequal_prop::(); + hash_unequal_prop(); } #[test] fn test_active_window_serialization() { - type H = Tip5; - - let aw0 = ActiveWindow::::new(); + let aw0 = ActiveWindow::new(); let json_aw0 = serde_json::to_string(&aw0).unwrap(); - let aw0_back = serde_json::from_str::>(&json_aw0).unwrap(); + let aw0_back = serde_json::from_str::(&json_aw0).unwrap(); assert_eq!(aw0.sbf, aw0_back.sbf); } #[test] fn test_active_window_decode() { - type H = Tip5; let mut rng = thread_rng(); - let mut aw0 = ActiveWindow::::new(); + let mut aw0 = ActiveWindow::new(); for _ in 0..37 { aw0.insert(rng.next_u32() % WINDOW_SIZE); } diff --git a/src/util_types/mutator_set/addition_record.rs b/src/util_types/mutator_set/addition_record.rs index 5dddc523e..6d0e3bc82 100644 --- a/src/util_types/mutator_set/addition_record.rs +++ b/src/util_types/mutator_set/addition_record.rs @@ -30,59 +30,65 @@ pub fn pseudorandom_addition_record(seed: [u8; 32]) -> AdditionRecord { #[cfg(test)] mod addition_record_tests { + use crate::models::blockchain::shared::Hash; use crate::util_types::mutator_set::mutator_set_trait::commit; use rand::random; - use twenty_first::shared_math::tip5::Tip5; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use super::*; #[test] fn get_size_test() { - type H = Tip5; - - let addition_record_0: AdditionRecord = - commit::(H::hash(&1492u128), H::hash(&1522u128), H::hash(&1521u128)); + let addition_record_0: AdditionRecord = commit( + Hash::hash(&1492u128), + Hash::hash(&1522u128), + Hash::hash(&1521u128), + ); assert_eq!(std::mem::size_of::(), addition_record_0.get_size()); } #[test] fn hash_identity_test() { - type H = Tip5; - - let addition_record_0: AdditionRecord = - commit::(H::hash(&1492u128), H::hash(&1522u128), H::hash(&1521u128)); + let addition_record_0: AdditionRecord = commit( + Hash::hash(&1492u128), + Hash::hash(&1522u128), + Hash::hash(&1521u128), + ); - let addition_record_1: AdditionRecord = - commit::(H::hash(&1492u128), H::hash(&1522u128), H::hash(&1521u128)); + let addition_record_1: AdditionRecord = commit( + Hash::hash(&1492u128), + Hash::hash(&1522u128), + Hash::hash(&1521u128), + ); assert_eq!( - H::hash(&addition_record_0), - H::hash(&addition_record_1), + Hash::hash(&addition_record_0), + Hash::hash(&addition_record_1), "Two addition records with same commitments and same MMR AOCLs must agree." ); - let addition_record_2: AdditionRecord = - commit::(H::hash(&1451u128), H::hash(&1480u128), H::hash(&1481u128)); + let addition_record_2: AdditionRecord = commit( + Hash::hash(&1451u128), + Hash::hash(&1480u128), + Hash::hash(&1481u128), + ); // Verify behavior with empty mutator sets. All empty MS' are the same. assert_ne!( - H::hash(&addition_record_0), - H::hash(&addition_record_2), + Hash::hash(&addition_record_0), + Hash::hash(&addition_record_2), "Two addition records with differing commitments but same MMR AOCLs must differ." ); } #[test] fn serialization_test() { - type H = Tip5; - - let item = H::hash(&1492u128); - let sender_randomness = H::hash(&1522u128); - let receiver_digest = H::hash(&1583u128); - let addition_record: AdditionRecord = commit::(item, sender_randomness, receiver_digest); + let item = Hash::hash(&1492u128); + let sender_randomness = Hash::hash(&1522u128); + let receiver_digest = Hash::hash(&1583u128); + let addition_record: AdditionRecord = commit(item, sender_randomness, receiver_digest); let json = serde_json::to_string(&addition_record).unwrap(); let s_back = serde_json::from_str::(&json).unwrap(); assert_eq!( diff --git a/src/util_types/mutator_set/archival_mutator_set.rs b/src/util_types/mutator_set/archival_mutator_set.rs index 17805574f..3029e875c 100644 --- a/src/util_types/mutator_set/archival_mutator_set.rs +++ b/src/util_types/mutator_set/archival_mutator_set.rs @@ -1,8 +1,8 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use std::collections::{BTreeSet, HashMap}; use std::error::Error; -use twenty_first::shared_math::bfield_codec::BFieldCodec; use twenty_first::shared_math::tip5::Digest; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::mmr; @@ -22,19 +22,17 @@ use super::mutator_set_trait::MutatorSet; use super::removal_record::RemovalRecord; use super::shared::CHUNK_SIZE; -pub struct ArchivalMutatorSet +pub struct ArchivalMutatorSet where - H: AlgebraicHasher + BFieldCodec, MmrStorage: StorageVec, ChunkStorage: StorageVec, { - pub kernel: MutatorSetKernel>, + pub kernel: MutatorSetKernel>, pub chunks: ChunkStorage, } -impl MutatorSet for ArchivalMutatorSet +impl MutatorSet for ArchivalMutatorSet where - H: AlgebraicHasher + BFieldCodec, MmrStorage: StorageVec, ChunkStorage: StorageVec, { @@ -43,16 +41,16 @@ where item: Digest, sender_randomness: Digest, receiver_preimage: Digest, - ) -> MsMembershipProof { + ) -> MsMembershipProof { self.kernel .prove(item, sender_randomness, receiver_preimage) } - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { + fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { self.kernel.verify(item, membership_proof) } - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { + fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { self.kernel.drop(item, membership_proof) } @@ -72,7 +70,7 @@ where } } - fn remove(&mut self, removal_record: &RemovalRecord) { + fn remove(&mut self, removal_record: &RemovalRecord) { let new_chunks: HashMap = self.kernel.remove_helper(removal_record); // note: set_many() is atomic. self.chunks.set_many(new_chunks); @@ -86,8 +84,8 @@ where /// updated fn batch_remove( &mut self, - removal_records: Vec>, - preserved_membership_proofs: &mut [&mut MsMembershipProof], + removal_records: Vec, + preserved_membership_proofs: &mut [&mut MsMembershipProof], ) { let chunk_index_to_chunk_mutation = self .kernel @@ -99,9 +97,8 @@ where } /// Methods that only work when implementing using archival MMRs as the underlying two MMRs -impl ArchivalMutatorSet +impl ArchivalMutatorSet where - H: AlgebraicHasher + BFieldCodec, MmrStorage: StorageVec, ChunkStorage: StorageVec, { @@ -109,8 +106,8 @@ where assert_eq!(0, aocl.len()); assert_eq!(0, swbf_inactive.len()); assert_eq!(0, chunks.len()); - let aocl: ArchivalMmr = ArchivalMmr::new(aocl); - let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); + let aocl: ArchivalMmr = ArchivalMmr::new(aocl); + let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); Self { kernel: MutatorSetKernel { aocl, @@ -125,10 +122,10 @@ where aocl: MmrStorage, swbf_inactive: MmrStorage, chunks: ChunkStorage, - active_window: ActiveWindow, + active_window: ActiveWindow, ) -> Self { - let aocl: ArchivalMmr = ArchivalMmr::new(aocl); - let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); + let aocl: ArchivalMmr = ArchivalMmr::new(aocl); + let swbf_inactive: ArchivalMmr = ArchivalMmr::new(swbf_inactive); Self { kernel: MutatorSetKernel { @@ -144,7 +141,7 @@ where pub fn get_aocl_authentication_path( &self, index: u64, - ) -> Result, Box> { + ) -> Result, Box> { if self.kernel.aocl.count_leaves() <= index { return Err(Box::new( MutatorSetKernelError::RequestedAoclAuthPathOutOfBounds(( @@ -161,7 +158,7 @@ where pub fn get_chunk_and_auth_path( &self, chunk_index: u64, - ) -> Result<(mmr::mmr_membership_proof::MmrMembershipProof, Chunk), Box> { + ) -> Result<(mmr::mmr_membership_proof::MmrMembershipProof, Chunk), Box> { if self.kernel.swbf_inactive.count_leaves() <= chunk_index { return Err(Box::new( MutatorSetKernelError::RequestedSwbfAuthPathOutOfBounds(( @@ -171,7 +168,7 @@ where )); } - let chunk_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = + let chunk_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = self.kernel.swbf_inactive.prove_membership(chunk_index).0; // This check should never fail. It would mean that chunks are missing but that the @@ -195,14 +192,13 @@ where sender_randomness: Digest, receiver_preimage: Digest, aocl_index: u64, - ) -> Result, Box> { + ) -> Result> { if self.kernel.aocl.is_empty() { return Err(Box::new(MutatorSetKernelError::MutatorSetIsEmpty)); } let auth_path_aocl = self.get_aocl_authentication_path(aocl_index)?; - let swbf_indices = - get_swbf_indices::(item, sender_randomness, receiver_preimage, aocl_index); + let swbf_indices = get_swbf_indices(item, sender_randomness, receiver_preimage, aocl_index); let batch_index = self.kernel.get_batch_index(); let window_start = batch_index as u128 * CHUNK_SIZE as u128; @@ -212,13 +208,13 @@ where .filter(|bi| **bi < window_start) .map(|bi| (*bi / CHUNK_SIZE as u128) as u64) .collect(); - let mut target_chunks: ChunkDictionary = ChunkDictionary::default(); + let mut target_chunks: ChunkDictionary = ChunkDictionary::default(); for (chunk_index, chunk) in self.chunks.many_iter(chunk_indices) { assert!( self.chunks.len() > chunk_index, "Chunks must be known if its authentication path is known." ); - let chunk_membership_proof: mmr::mmr_membership_proof::MmrMembershipProof = + let chunk_membership_proof: mmr::mmr_membership_proof::MmrMembershipProof = self.kernel.swbf_inactive.prove_membership(chunk_index).0; target_chunks .dictionary @@ -236,7 +232,7 @@ where /// Revert the `RemovalRecord` by removing the indices that /// were inserted by it. These live in either the active window, or /// in a relevant chunk. - pub fn revert_remove(&mut self, removal_record: &RemovalRecord) { + pub fn revert_remove(&mut self, removal_record: &RemovalRecord) { let removal_record_indices: Vec = removal_record.absolute_indices.to_vec(); let batch_index = self.kernel.get_batch_index(); let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; @@ -269,7 +265,7 @@ where // update archival mmr self.kernel .swbf_inactive - .mutate_leaf_raw(chunk_index, H::hash(&new_chunk)); + .mutate_leaf_raw(chunk_index, Hash::hash(&new_chunk)); self.chunks.set(chunk_index, new_chunk); } @@ -299,7 +295,7 @@ where // 2. Possibly shrink bloom filter by moving a chunk back into active window // // This happens when the batch index changes (i.e. every `BATCH_SIZE` addition). - if !MutatorSetKernel::>::window_slides_back(removed_add_index) + if !MutatorSetKernel::>::window_slides_back(removed_add_index) { return; } @@ -331,8 +327,8 @@ where } } - pub fn accumulator(&self) -> MutatorSetAccumulator { - let set_commitment = MutatorSetKernel::> { + pub fn accumulator(&self) -> MutatorSetAccumulator { + let set_commitment = MutatorSetKernel::> { aocl: MmrAccumulator::init( self.kernel.aocl.get_peaks(), self.kernel.aocl.count_leaves(), @@ -354,7 +350,6 @@ mod archival_mutator_set_tests { use itertools::Itertools; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; - use twenty_first::shared_math::tip5::Tip5; use crate::util_types::mutator_set::mutator_set_trait::commit; use crate::util_types::mutator_set::removal_record::AbsoluteIndexSet; @@ -367,19 +362,17 @@ mod archival_mutator_set_tests { #[test] fn archival_set_commitment_test() { - type H = Tip5; - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let num_additions = 65; - let mut membership_proofs: Vec> = vec![]; + let mut membership_proofs: Vec = vec![]; let mut items: Vec = vec![]; for i in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = archival_mutator_set.prove(item, sender_randomness, receiver_preimage); @@ -426,9 +419,7 @@ mod archival_mutator_set_tests { #[test] fn archival_mutator_set_revert_add_test() { - type H = Tip5; - - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); // Repeatedly insert `AdditionRecord` into empty MutatorSet and revert it @@ -484,8 +475,6 @@ mod archival_mutator_set_tests { #[test] fn bloom_filter_is_reversible() { - type H = Tip5; - // With the `3086841408u32` seed a collission is generated at i = 1 and i = 38, on index 510714 let seed_integer = 3086841408u32; let seed = seed_integer.to_be_bytes(); @@ -496,11 +485,11 @@ mod archival_mutator_set_tests { let mut seeded_rng = StdRng::from_seed(seed_as_bytes); - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); // Also keep track of a mutator set accumulator to verify that this uses an invertible Bloom filter - let mut msa = MutatorSetAccumulator::::default(); + let mut msa = MutatorSetAccumulator::default(); let mut items = vec![]; let mut mps = vec![]; @@ -546,8 +535,8 @@ mod archival_mutator_set_tests { // Verify that the MPs with colliding indices are still valid for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, + &msa as &dyn MutatorSet, + archival_mutator_set as &dyn MutatorSet, ] { assert!( ms.verify( @@ -584,8 +573,8 @@ mod archival_mutator_set_tests { MsMembershipProof::batch_update_from_remove(&mut mps.iter_mut().collect_vec(), &rem0) .unwrap(); for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, + &msa as &dyn MutatorSet, + archival_mutator_set as &dyn MutatorSet, ] { assert!( !ms.verify( @@ -610,8 +599,8 @@ mod archival_mutator_set_tests { MsMembershipProof::batch_update_from_remove(&mut mps.iter_mut().collect_vec(), &rem1) .unwrap(); for ms in [ - &msa as &dyn MutatorSet, - archival_mutator_set as &dyn MutatorSet, + &msa as &dyn MutatorSet, + archival_mutator_set as &dyn MutatorSet, ] { assert!( !ms.verify( @@ -669,9 +658,7 @@ mod archival_mutator_set_tests { #[should_panic(expected = "Decremented integer is already zero.")] #[test] fn revert_remove_from_active_bloom_filter_panic() { - type H = Tip5; - - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let record = prepare_random_addition(archival_mutator_set); let (item, addition_record, membership_proof) = record; @@ -687,9 +674,7 @@ mod archival_mutator_set_tests { #[should_panic(expected = "Attempted to remove index that was not present in chunk.")] #[test] fn revert_remove_invalid_panic() { - type H = Tip5; - - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); for _ in 0..2 * BATCH_SIZE { @@ -712,9 +697,7 @@ mod archival_mutator_set_tests { #[test] fn archival_mutator_set_revert_remove_test() { - type H = Tip5; - - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let n_iterations = 11 * BATCH_SIZE as usize; let mut records = Vec::with_capacity(n_iterations); @@ -756,20 +739,18 @@ mod archival_mutator_set_tests { #[test] fn archival_set_batch_remove_simple_test() { - type H = Tip5; - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let num_additions = 130; - let mut membership_proofs: Vec> = vec![]; + let mut membership_proofs: Vec = vec![]; let mut items: Vec = vec![]; for _ in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = archival_mutator_set.prove(item, sender_randomness, receiver_preimage); @@ -787,7 +768,7 @@ mod archival_mutator_set_tests { items.push(item); } - let mut removal_records: Vec> = vec![]; + let mut removal_records: Vec = vec![]; for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { removal_records.push(archival_mutator_set.drop(item, mp)); } @@ -803,20 +784,19 @@ mod archival_mutator_set_tests { #[test] fn archival_set_batch_remove_dynamic_test() { - type H = Tip5; - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let num_additions = 4 * BATCH_SIZE; for remove_factor in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] { - let mut membership_proofs: Vec> = vec![]; + let mut membership_proofs: Vec = vec![]; let mut items: Vec = vec![]; for _ in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = archival_mutator_set.prove(item, sender_randomness, receiver_preimage); @@ -836,7 +816,7 @@ mod archival_mutator_set_tests { let mut rng = rand::thread_rng(); let mut skipped_removes: Vec = vec![]; - let mut removal_records: Vec> = vec![]; + let mut removal_records: Vec = vec![]; for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { let skipped = rng.gen_range(0.0..1.0) < remove_factor; skipped_removes.push(skipped); @@ -875,32 +855,27 @@ mod archival_mutator_set_tests { } fn prepare_seeded_prng_addition< - H: AlgebraicHasher + BFieldCodec, MmrStorage: StorageVec, ChunkStorage: StorageVec, >( - archival_mutator_set: &mut ArchivalMutatorSet, + archival_mutator_set: &mut ArchivalMutatorSet, rng: &mut StdRng, - ) -> (Digest, AdditionRecord, MsMembershipProof) { + ) -> (Digest, AdditionRecord, MsMembershipProof) { let item: Digest = rng.gen(); let sender_randomness: Digest = rng.gen(); let receiver_preimage: Digest = rng.gen(); - let addition_record = commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = archival_mutator_set.prove(item, sender_randomness, receiver_preimage); (item, addition_record, membership_proof) } - fn prepare_random_addition< - H: AlgebraicHasher + BFieldCodec, - MmrStorage: StorageVec, - ChunkStorage: StorageVec, - >( - archival_mutator_set: &mut ArchivalMutatorSet, - ) -> (Digest, AdditionRecord, MsMembershipProof) { + fn prepare_random_addition, ChunkStorage: StorageVec>( + archival_mutator_set: &mut ArchivalMutatorSet, + ) -> (Digest, AdditionRecord, MsMembershipProof) { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = archival_mutator_set.prove(item, sender_randomness, receiver_preimage); diff --git a/src/util_types/mutator_set/chunk_dictionary.rs b/src/util_types/mutator_set/chunk_dictionary.rs index 191c9f092..6161d7446 100644 --- a/src/util_types/mutator_set/chunk_dictionary.rs +++ b/src/util_types/mutator_set/chunk_dictionary.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::{triton_vm, twenty_first}; use anyhow::bail; @@ -12,30 +13,21 @@ use twenty_first::shared_math::bfield_codec::BFieldCodec; use super::chunk::Chunk; use twenty_first::shared_math::b_field_element::BFieldElement; -use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; -#[derive(Clone, Debug, Serialize, Deserialize, GetSize, PartialEq, Eq)] -pub struct ChunkDictionary { +#[derive(Clone, Debug, Serialize, Deserialize, GetSize, PartialEq, Eq, Default)] +pub struct ChunkDictionary { // {chunk index => (MMR membership proof for the whole chunk to which index belongs, chunk value)} - pub dictionary: HashMap, Chunk)>, + pub dictionary: HashMap, Chunk)>, } -impl ChunkDictionary { - pub fn new(dictionary: HashMap, Chunk)>) -> Self { +impl ChunkDictionary { + pub fn new(dictionary: HashMap, Chunk)>) -> Self { Self { dictionary } } } -impl Default for ChunkDictionary { - fn default() -> Self { - Self { - dictionary: HashMap::new(), - } - } -} - -impl BFieldCodec for ChunkDictionary { +impl BFieldCodec for ChunkDictionary { type Error = anyhow::Error; fn encode(&self) -> Vec { @@ -74,9 +66,8 @@ impl BFieldCodec for ChunkDictionary { } let memproof_length = sequence[read_index].value() as usize; read_index += 1; - let membership_proof = *MmrMembershipProof::::decode( - &sequence[read_index..read_index + memproof_length], - )?; + let membership_proof = + *MmrMembershipProof::decode(&sequence[read_index..read_index + memproof_length])?; read_index += memproof_length; // read chunk @@ -100,7 +91,7 @@ impl BFieldCodec for ChunkDictionary { } /// Generate pseudorandom chunk dictionary from the given seed, for testing purposes. -pub fn pseudorandom_chunk_dictionary(seed: [u8; 32]) -> ChunkDictionary { +pub fn pseudorandom_chunk_dictionary(seed: [u8; 32]) -> ChunkDictionary { let mut rng: StdRng = SeedableRng::from_seed(seed); let mut dictionary = HashMap::new(); @@ -119,7 +110,7 @@ pub fn pseudorandom_chunk_dictionary(seed: [u8; 32]) -> Chun ), ); } - ChunkDictionary::::new(dictionary) + ChunkDictionary::new(dictionary) } #[cfg(test)] @@ -127,6 +118,7 @@ mod chunk_dict_tests { use crate::util_types::mutator_set::shared::CHUNK_SIZE; use crate::util_types::test_shared::mutator_set::random_chunk_dictionary; + use tasm_lib::twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::shared_math::other::random_elements; use twenty_first::shared_math::tip5::{Digest, Tip5}; use twenty_first::test_shared::mmr::get_rustyleveldb_ammr_from_digests; @@ -138,9 +130,9 @@ mod chunk_dict_tests { fn hash_test() { type H = Tip5; - let chunkdict0 = ChunkDictionary::::default(); - let chunkdict00 = ChunkDictionary::::default(); - assert_eq!(H::hash(&chunkdict0), H::hash(&chunkdict00)); + let chunkdict0 = ChunkDictionary::default(); + let chunkdict00 = ChunkDictionary::default(); + assert_eq!(Hash::hash(&chunkdict0), Hash::hash(&chunkdict00)); // Insert elements let num_leaves = 3; @@ -155,7 +147,7 @@ mod chunk_dict_tests { } }; let value1 = (mp1, chunk1); - let chunkdict1 = ChunkDictionary::::new(HashMap::from([(key1, value1.clone())])); + let chunkdict1 = ChunkDictionary::new(HashMap::from([(key1, value1.clone())])); // Insert two more element and verify that the hash is deterministic which implies that the // elements in the preimage are sorted deterministically. @@ -164,27 +156,27 @@ mod chunk_dict_tests { let mut chunk2 = Chunk::empty_chunk(); chunk2.insert(CHUNK_SIZE / 2 + 1); let value2 = (mp2, chunk2); - let chunkdict2 = ChunkDictionary::::new(HashMap::from([ + let chunkdict2 = ChunkDictionary::new(HashMap::from([ (key1, value1.clone()), (key2, value2.clone()), ])); let key3: u64 = 89; - let chunkdict3 = ChunkDictionary::::new(HashMap::from([ + let chunkdict3 = ChunkDictionary::new(HashMap::from([ (key1, value1.clone()), (key2, value2.clone()), (key3, value2.clone()), ])); - assert_ne!(H::hash(&chunkdict0), H::hash(&chunkdict1)); - assert_ne!(H::hash(&chunkdict0), H::hash(&chunkdict2)); - assert_ne!(H::hash(&chunkdict0), H::hash(&chunkdict3)); - assert_ne!(H::hash(&chunkdict1), H::hash(&chunkdict2)); - assert_ne!(H::hash(&chunkdict1), H::hash(&chunkdict3)); - assert_ne!(H::hash(&chunkdict2), H::hash(&chunkdict3)); + assert_ne!(Hash::hash(&chunkdict0), Hash::hash(&chunkdict1)); + assert_ne!(Hash::hash(&chunkdict0), Hash::hash(&chunkdict2)); + assert_ne!(Hash::hash(&chunkdict0), Hash::hash(&chunkdict3)); + assert_ne!(Hash::hash(&chunkdict1), Hash::hash(&chunkdict2)); + assert_ne!(Hash::hash(&chunkdict1), Hash::hash(&chunkdict3)); + assert_ne!(Hash::hash(&chunkdict2), Hash::hash(&chunkdict3)); // Construct similar data structure to `two_elements` but insert key/value pairs in opposite order - let chunkdict3_alt = ChunkDictionary::::new(HashMap::from([ + let chunkdict3_alt = ChunkDictionary::new(HashMap::from([ (key3, value2.clone()), (key1, value1.clone()), (key2, value2.clone()), @@ -193,17 +185,17 @@ mod chunk_dict_tests { // Verify that keys are sorted deterministically when hashing chunk dictionary. // This test fails if the hash method does not sort the keys for _ in 0..10 { - assert_eq!(H::hash(&chunkdict3), H::hash(&chunkdict3_alt)); + assert_eq!(Hash::hash(&chunkdict3), Hash::hash(&chunkdict3_alt)); } // Negative: Construct data structure where the keys and values are switched - let chunkdict3_switched = ChunkDictionary::::new(HashMap::from([ + let chunkdict3_switched = ChunkDictionary::new(HashMap::from([ (key1, value2.clone()), (key2, value1), (key3, value2), ])); - assert_ne!(H::hash(&chunkdict3), H::hash(&chunkdict3_switched)); + assert_ne!(Hash::hash(&chunkdict3), Hash::hash(&chunkdict3_switched)); } #[test] @@ -212,10 +204,10 @@ mod chunk_dict_tests { // an imported library. I included it here, though, because the setup seems a bit clumsy // to me so far. type H = Tip5; - let s_empty: ChunkDictionary = ChunkDictionary::new(HashMap::new()); + let s_empty: ChunkDictionary = ChunkDictionary::new(HashMap::new()); let json = serde_json::to_string(&s_empty).unwrap(); println!("json = {}", json); - let s_back = serde_json::from_str::>(&json).unwrap(); + let s_back = serde_json::from_str::(&json).unwrap(); assert!(s_back.dictionary.is_empty()); // Build a non-empty chunk dict and verify that it still works @@ -227,11 +219,10 @@ mod chunk_dict_tests { relative_indices: (0..CHUNK_SIZE).collect(), }; - let s_non_empty = - ChunkDictionary::::new(HashMap::from([(key, (mp.clone(), chunk.clone()))])); + let s_non_empty = ChunkDictionary::new(HashMap::from([(key, (mp.clone(), chunk.clone()))])); let json_non_empty = serde_json::to_string(&s_non_empty).unwrap(); println!("json_non_empty = {}", json_non_empty); - let s_back_non_empty = serde_json::from_str::>(&json_non_empty).unwrap(); + let s_back_non_empty = serde_json::from_str::(&json_non_empty).unwrap(); assert!(!s_back_non_empty.dictionary.is_empty()); assert!(s_back_non_empty.dictionary.contains_key(&key)); assert_eq!((mp, chunk), s_back_non_empty.dictionary[&key]); @@ -239,8 +230,7 @@ mod chunk_dict_tests { #[test] fn test_chunk_dictionary_decode() { - type H = Tip5; - let chunk_dictionary = random_chunk_dictionary::(); + let chunk_dictionary = random_chunk_dictionary(); let encoded = chunk_dictionary.encode(); let decoded = *ChunkDictionary::decode(&encoded).unwrap(); diff --git a/src/util_types/mutator_set/ms_membership_proof.rs b/src/util_types/mutator_set/ms_membership_proof.rs index d5f19188b..05bfe20d2 100644 --- a/src/util_types/mutator_set/ms_membership_proof.rs +++ b/src/util_types/mutator_set/ms_membership_proof.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use get_size::GetSize; @@ -47,17 +48,17 @@ pub enum MembershipProofError { // In order to store this structure in the database, it needs to be serializable. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, GetSize, BFieldCodec, TasmObject)] -pub struct MsMembershipProof { +pub struct MsMembershipProof { pub sender_randomness: Digest, pub receiver_preimage: Digest, - pub auth_path_aocl: MmrMembershipProof, - pub target_chunks: ChunkDictionary, + pub auth_path_aocl: MmrMembershipProof, + pub target_chunks: ChunkDictionary, } -impl MsMembershipProof { +impl MsMembershipProof { /// Compute the indices that will be added to the SWBF if this item is removed. pub fn compute_indices(&self, item: Digest) -> AbsoluteIndexSet { - AbsoluteIndexSet::new(&get_swbf_indices::( + AbsoluteIndexSet::new(&get_swbf_indices( item, self.sender_randomness, self.receiver_preimage, @@ -67,10 +68,10 @@ impl MsMembershipProof { /// Update a list of membership proofs in anticipation of an addition. If successful, /// return (wrapped in an Ok) a vector of all indices of updated membership proofs. - pub fn batch_update_from_addition>( + pub fn batch_update_from_addition>( membership_proofs: &mut [&mut Self], own_items: &[Digest], - mutator_set: &MutatorSetKernel, + mutator_set: &MutatorSetKernel, addition_record: &AdditionRecord, ) -> Result, Box> { assert!( @@ -101,7 +102,7 @@ impl MsMembershipProof { ); // if window does not slide, we are done - if !MutatorSetKernel::::window_slides(new_item_index) { + if !MutatorSetKernel::::window_slides(new_item_index) { return Ok(indices_for_updated_mps); } @@ -109,7 +110,7 @@ impl MsMembershipProof { let batch_index = new_item_index / BATCH_SIZE as u64; let old_window_start_batch_index = batch_index - 1; let new_chunk = mutator_set.swbf_active.slid_chunk(); - let new_chunk_digest: Digest = H::hash(&new_chunk); + let new_chunk_digest: Digest = Hash::hash(&new_chunk); // Insert the new chunk digest into the accumulator-version of the // SWBF MMR to get its authentication path. It's important to convert the MMR @@ -117,8 +118,8 @@ impl MsMembershipProof { // a whole archival MMR for this operation, as the archival MMR can be in the // size of gigabytes, whereas the MMR accumulator should be in the size of // kilobytes. - let mut mmra: MmrAccumulator = mutator_set.swbf_inactive.to_accumulator(); - let new_swbf_auth_path: MmrMembershipProof = mmra.append(new_chunk_digest); + let mut mmra: MmrAccumulator = mutator_set.swbf_inactive.to_accumulator(); + let new_swbf_auth_path: MmrMembershipProof = mmra.append(new_chunk_digest); // Collect all indices for all membership proofs that are being updated // Notice that this is a *very* expensive operation if the indices are @@ -129,7 +130,7 @@ impl MsMembershipProof { .zip(own_items.iter()) .enumerate() .for_each(|(i, (mp, &item))| { - let indices = AbsoluteIndexSet::new(&get_swbf_indices::( + let indices = AbsoluteIndexSet::new(&get_swbf_indices( item, mp.sender_randomness, mp.receiver_preimage, @@ -190,7 +191,7 @@ impl MsMembershipProof { // So relegating that bookkeeping to this function instead would not be more // efficient. let mut mmr_membership_proofs_for_append: Vec< - &mut mmr::mmr_membership_proof::MmrMembershipProof, + &mut mmr::mmr_membership_proof::MmrMembershipProof, > = vec![]; // The `mmr_membership_proof_index_to_membership_proof_index` variable is to remember @@ -208,7 +209,7 @@ impl MsMembershipProof { } let indices_for_mutated_values = - mmr::mmr_membership_proof::MmrMembershipProof::::batch_update_from_append( + mmr::mmr_membership_proof::MmrMembershipProof::::batch_update_from_append( &mut mmr_membership_proofs_for_append, mutator_set.swbf_inactive.count_leaves(), new_chunk_digest, @@ -240,7 +241,7 @@ impl MsMembershipProof { pub fn update_from_addition( &mut self, own_item: Digest, - mutator_set: &MutatorSetAccumulator, + mutator_set: &MutatorSetAccumulator, addition_record: &AdditionRecord, ) -> Result> { assert!(self.auth_path_aocl.leaf_index < mutator_set.kernel.aocl.count_leaves()); @@ -254,16 +255,16 @@ impl MsMembershipProof { ); // if window does not slide, we are done - if !MutatorSetKernel::>::window_slides(new_item_index) { + if !MutatorSetKernel::>::window_slides(new_item_index) { return Ok(aocl_mp_updated); } // window does slide let new_chunk = mutator_set.kernel.swbf_active.slid_chunk(); - let new_chunk_digest: Digest = H::hash(&new_chunk); + let new_chunk_digest: Digest = Hash::hash(&new_chunk); // Get indices by recalculating them. (We do not cache indices any more.) - let all_indices = get_swbf_indices::( + let all_indices = get_swbf_indices( own_item, self.sender_randomness, self.receiver_preimage, @@ -281,8 +282,8 @@ impl MsMembershipProof { // a whole archival MMR for this operation, as the archival MMR can be in the // size of gigabytes, whereas the MMR accumulator should be in the size of // kilobytes. - let mut mmra: MmrAccumulator = mutator_set.kernel.swbf_inactive.to_accumulator(); - let new_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = + let mut mmra: MmrAccumulator = mutator_set.kernel.swbf_inactive.to_accumulator(); + let new_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = mmra.append(new_chunk_digest); let mut swbf_chunk_dictionary_updated = false; @@ -344,7 +345,7 @@ impl MsMembershipProof { /// the state of the mutator set kernel prior to adding them. pub fn revert_update_from_batch_addition( &mut self, - previous_mutator_set: &MutatorSetAccumulator, + previous_mutator_set: &MutatorSetAccumulator, ) { // calculate AOCL MMR MP length let previous_leaf_count = previous_mutator_set.kernel.aocl.count_leaves(); @@ -383,11 +384,11 @@ impl MsMembershipProof { /// that have been mutated. pub fn batch_update_from_remove( membership_proofs: &mut [&mut Self], - removal_record: &RemovalRecord, + removal_record: &RemovalRecord, ) -> Result, Box> { // Set all chunk values to the new values and calculate the mutation argument // for the batch updating of the MMR membership proofs. - let mut chunk_dictionaries: Vec<&mut ChunkDictionary> = membership_proofs + let mut chunk_dictionaries: Vec<&mut ChunkDictionary> = membership_proofs .iter_mut() .map(|mp| &mut mp.target_chunks) .collect(); @@ -400,7 +401,7 @@ impl MsMembershipProof { // mutated. // The chunk values contained in the MS membership proof's chunk dictionary has already // been updated by the `get_batch_mutation_argument_for_removal_record` function. - let mut own_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = vec![]; + let mut own_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = vec![]; let mut mmr_mp_index_to_input_index: Vec = vec![]; for (i, chunk_dict) in chunk_dictionaries.iter_mut().enumerate() { for (_, (mp, _)) in chunk_dict.dictionary.iter_mut() { @@ -430,7 +431,7 @@ impl MsMembershipProof { pub fn update_from_remove( &mut self, - removal_record: &RemovalRecord, + removal_record: &RemovalRecord, ) -> Result> { // Removing items does not slide the active window. We only // need to take into account new indices in the sparse Bloom @@ -450,7 +451,7 @@ impl MsMembershipProof { // It would be sufficient to only update the membership proofs that live in the Merkle // trees that have been updated, but it probably will not give a measureable speedup // since this change would not reduce the amount of hashing needed - let mut chunk_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = self + let mut chunk_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = self .target_chunks .dictionary .iter_mut() @@ -470,7 +471,7 @@ impl MsMembershipProof { /// with a removal record. pub fn revert_update_from_remove( &mut self, - removal_record: &RemovalRecord, + removal_record: &RemovalRecord, ) -> Result> { // The logic here is essentially the same as in // `update_from_remove` but with the new and old chunks @@ -489,7 +490,7 @@ impl MsMembershipProof { // Note that *all* MMR membership proofs must be updated. It's not sufficient to update // those whose leaf has changed, since an authentication path changes if *any* leaf // in the same Merkle tree (under the same MMR peak) changes. - let mut chunk_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = self + let mut chunk_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = self .target_chunks .dictionary .iter_mut() @@ -508,14 +509,12 @@ impl MsMembershipProof { /// Generate a pseudorandom mutator set membership proof from the given seed, for testing /// purposes. -pub fn pseudorandom_mutator_set_membership_proof( - seed: [u8; 32], -) -> MsMembershipProof { +pub fn pseudorandom_mutator_set_membership_proof(seed: [u8; 32]) -> MsMembershipProof { let mut rng: StdRng = SeedableRng::from_seed(seed); let sender_randomness: Digest = rng.gen(); let receiver_preimage: Digest = rng.gen(); - let auth_path_aocl: MmrMembershipProof = pseudorandom_mmr_membership_proof::(rng.gen()); - let target_chunks: ChunkDictionary = pseudorandom_chunk_dictionary(rng.gen()); + let auth_path_aocl: MmrMembershipProof = pseudorandom_mmr_membership_proof(rng.gen()); + let target_chunks: ChunkDictionary = pseudorandom_chunk_dictionary(rng.gen()); MsMembershipProof { sender_randomness, receiver_preimage, @@ -554,41 +553,39 @@ mod ms_proof_tests { use rand::rngs::StdRng; use rand::{random, thread_rng, Rng, RngCore, SeedableRng}; use twenty_first::shared_math::other::random_elements; - use twenty_first::shared_math::tip5::Tip5; use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; #[test] fn mp_equality_test() { - type H = Tip5; let mut rng = thread_rng(); let (_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let base_mp = MsMembershipProof:: { + let base_mp = MsMembershipProof { sender_randomness, receiver_preimage, - auth_path_aocl: MmrMembershipProof::::new(0, vec![]), + auth_path_aocl: MmrMembershipProof::::new(0, vec![]), target_chunks: ChunkDictionary::default(), }; - let mp_with_different_leaf_index = MsMembershipProof:: { + let mp_with_different_leaf_index = MsMembershipProof { sender_randomness, receiver_preimage, - auth_path_aocl: MmrMembershipProof::::new(100073, vec![]), + auth_path_aocl: MmrMembershipProof::::new(100073, vec![]), target_chunks: ChunkDictionary::default(), }; - let mp_with_different_sender_randomness = MsMembershipProof:: { + let mp_with_different_sender_randomness = MsMembershipProof { sender_randomness: rng.gen(), receiver_preimage, - auth_path_aocl: MmrMembershipProof::::new(0, vec![]), + auth_path_aocl: MmrMembershipProof::::new(0, vec![]), target_chunks: ChunkDictionary::default(), }; - let mp_with_different_receiver_preimage = MsMembershipProof:: { + let mp_with_different_receiver_preimage = MsMembershipProof { receiver_preimage: rng.gen(), sender_randomness, - auth_path_aocl: MmrMembershipProof::::new(0, vec![]), + auth_path_aocl: MmrMembershipProof::::new(0, vec![]), target_chunks: ChunkDictionary::default(), }; @@ -606,18 +603,18 @@ mod ms_proof_tests { // Construct an MMR with 7 leafs let mmr_digests = random_elements::(7); - let mut mmra: MmrAccumulator = MmrAccumulator::new(mmr_digests); + let mut mmra: MmrAccumulator = MmrAccumulator::new(mmr_digests); // Get an MMR membership proof by adding the 8th leaf let zero_chunk = Chunk::empty_chunk(); - let mmr_mp = mmra.append(H::hash(&zero_chunk)); + let mmr_mp = mmra.append(Hash::hash(&zero_chunk)); // Verify that the MMR membership proof has the expected length of 3 (sanity check) assert_eq!(3, mmr_mp.authentication_path.len()); // Create a new mutator set membership proof with a non-empty chunk dictionary // and verify that it is considered a different membership proof - let mut mp_mutated: MsMembershipProof = base_mp.clone(); + let mut mp_mutated: MsMembershipProof = base_mp.clone(); mp_mutated .target_chunks .dictionary @@ -629,8 +626,7 @@ mod ms_proof_tests { fn serialization_test() { // This test belongs here since the serialization for `Option<[T; $len]>` is implemented // in this code base as a macro. So this is basically a test of that macro. - type H = Tip5; - let accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); for _ in 0..10 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); @@ -639,7 +635,7 @@ mod ms_proof_tests { .prove(item, sender_randomness, receiver_preimage); let json: String = serde_json::to_string(&mp).unwrap(); - let mp_again = serde_json::from_str::>(&json).unwrap(); + let mp_again = serde_json::from_str::(&json).unwrap(); assert_eq!(mp_again.target_chunks, mp.target_chunks); assert_eq!(mp_again, mp); @@ -648,7 +644,6 @@ mod ms_proof_tests { #[test] fn revert_update_from_remove_test() { - type H = Tip5; let n = 100; let mut rng = thread_rng(); @@ -657,17 +652,16 @@ mod ms_proof_tests { let mut own_item = None; // set up mutator set - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); - let mut membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; + let mut membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; // add items for i in 0..n { let item: Digest = random(); let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); for (oi, mp) in membership_proofs.iter_mut() { mp.update_from_addition(*oi, &archival_mutator_set.accumulator(), &addition_record) @@ -726,8 +720,7 @@ mod ms_proof_tests { RemovalRecord::batch_update_from_remove( &mut mutable_records.iter_mut().collect::>(), applied_removal_record, - ) - .expect("Could not apply removal record."); + ); own_membership_proof .as_mut() @@ -777,8 +770,7 @@ mod ms_proof_tests { #[test] fn revert_update_single_remove_test() { - type H = Tip5; - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let ams = rms.ams_mut(); let mut mps = vec![]; let mut items = vec![]; @@ -788,8 +780,7 @@ mod ms_proof_tests { let item: Digest = random(); let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items, @@ -858,10 +849,8 @@ mod ms_proof_tests { #[test] fn revert_update_single_addition_test() { - type H = Tip5; - for j in 2..30 { - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let ams = rms.ams_mut(); // Add `j` items to MSA @@ -873,7 +862,7 @@ mod ms_proof_tests { let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); MsMembershipProof::batch_update_from_addition( &mut mps.iter_mut().collect_vec(), &items, @@ -909,9 +898,7 @@ mod ms_proof_tests { #[test] fn revert_update_from_addition_batches_test() { - type H = Tip5; - - let mut msa: MutatorSetAccumulator = MutatorSetAccumulator::new(); + let mut msa: MutatorSetAccumulator = MutatorSetAccumulator::new(); let mut rng = thread_rng(); for _ in 0..10 { @@ -925,7 +912,7 @@ mod ms_proof_tests { let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); msa.add(&addition_record); } @@ -933,10 +920,10 @@ mod ms_proof_tests { let own_item: Digest = random(); let own_sender_randomness: Digest = random(); let own_receiver_preimage: Digest = random(); - let own_addition_record = commit::( + let own_addition_record = commit( own_item, own_sender_randomness, - own_receiver_preimage.hash::(), + own_receiver_preimage.hash::(), ); let mut own_mp = msa.prove(own_item, own_sender_randomness, own_receiver_preimage); msa.add(&own_addition_record); @@ -948,7 +935,7 @@ mod ms_proof_tests { let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); own_mp .update_from_addition(own_item, &msa, &addition_record) .unwrap(); @@ -967,7 +954,7 @@ mod ms_proof_tests { let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); own_mp .update_from_addition(own_item, &msa, &addition_record) .unwrap(); @@ -990,7 +977,6 @@ mod ms_proof_tests { #[test] fn revert_update_from_addition_test() { - type H = Tip5; let mut rng = thread_rng(); let n = rng.next_u32() as usize % 100 + 1; // let n = 55; @@ -1001,7 +987,7 @@ mod ms_proof_tests { let mut own_item = None; // set up mutator set - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); // add items @@ -1010,8 +996,7 @@ mod ms_proof_tests { let item: Digest = random(); let sender_randomness: Digest = random(); let receiver_preimage: Digest = random(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); addition_records.push(addition_record); let membership_proof = @@ -1076,7 +1061,6 @@ mod ms_proof_tests { #[test] fn revert_updates_mixed_test() { - type H = Tip5; let mut rng_seeder = thread_rng(); // let seed_integer = rng.next_u32(); let error_tuple: (usize, u32) = ( @@ -1095,7 +1079,7 @@ mod ms_proof_tests { let mut rng = StdRng::from_seed(seed_as_bytes); - let mut rms = empty_rusty_mutator_set::(); + let mut rms = empty_rusty_mutator_set(); let archival_mutator_set = rms.ams_mut(); let own_index = rng.next_u32() as usize % 10; @@ -1106,10 +1090,10 @@ mod ms_proof_tests { rates.insert("additions".to_owned(), 0.7); rates.insert("removals".to_owned(), 0.95); - let mut tracked_items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; - let mut removed_items_and_membership_proofs: Vec<(Digest, MsMembershipProof, usize)> = + let mut tracked_items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; + let mut removed_items_and_membership_proofs: Vec<(Digest, MsMembershipProof, usize)> = vec![]; - let mut records: Vec>> = vec![]; + let mut records: Vec> = vec![]; for i in 0..2000 { let sample: f64 = rng.gen(); @@ -1128,7 +1112,7 @@ mod ms_proof_tests { // generate addition record let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); // record membership proof let membership_proof = @@ -1335,11 +1319,10 @@ mod ms_proof_tests { #[test] fn test_decode_mutator_set_membership_proof() { - type H = Tip5; for _ in 0..100 { - let msmp = random_mutator_set_membership_proof::(); + let msmp = random_mutator_set_membership_proof(); let encoded = msmp.encode(); - let decoded: MsMembershipProof = *MsMembershipProof::decode(&encoded).unwrap(); + let decoded: MsMembershipProof = *MsMembershipProof::decode(&encoded).unwrap(); assert_eq!(msmp, decoded); } } diff --git a/src/util_types/mutator_set/mutator_set_accumulator.rs b/src/util_types/mutator_set/mutator_set_accumulator.rs index 6df84a728..59b06d516 100644 --- a/src/util_types/mutator_set/mutator_set_accumulator.rs +++ b/src/util_types/mutator_set/mutator_set_accumulator.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use get_size::GetSize; @@ -16,15 +17,15 @@ use super::{ }; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, GetSize, BFieldCodec)] -pub struct MutatorSetAccumulator { - pub kernel: MutatorSetKernel>, +pub struct MutatorSetAccumulator { + pub kernel: MutatorSetKernel>, } -impl MutatorSetAccumulator { +impl MutatorSetAccumulator { pub fn new() -> Self { - let set_commitment = MutatorSetKernel::> { - aocl: MmrAccumulator::::new(vec![]), - swbf_inactive: MmrAccumulator::::new(vec![]), + let set_commitment = MutatorSetKernel::> { + aocl: MmrAccumulator::new(vec![]), + swbf_inactive: MmrAccumulator::new(vec![]), swbf_active: ActiveWindow::new(), }; @@ -34,11 +35,11 @@ impl MutatorSetAccumulator { } } -impl Default for MutatorSetAccumulator { +impl Default for MutatorSetAccumulator { fn default() -> Self { - let set_commitment = MutatorSetKernel::> { - aocl: MmrAccumulator::::new(vec![]), - swbf_inactive: MmrAccumulator::::new(vec![]), + let set_commitment = MutatorSetKernel::> { + aocl: MmrAccumulator::new(vec![]), + swbf_inactive: MmrAccumulator::new(vec![]), swbf_active: ActiveWindow::new(), }; @@ -48,22 +49,22 @@ impl Default for MutatorSetAccumulator { } } -impl MutatorSet for MutatorSetAccumulator { +impl MutatorSet for MutatorSetAccumulator { fn prove( &mut self, item: Digest, sender_randomness: Digest, receiver_preimage: Digest, - ) -> MsMembershipProof { + ) -> MsMembershipProof { self.kernel .prove(item, sender_randomness, receiver_preimage) } - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { + fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { self.kernel.verify(item, membership_proof) } - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { + fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { self.kernel.drop(item, membership_proof) } @@ -71,26 +72,26 @@ impl MutatorSet for MutatorSetAccumulator) { + fn remove(&mut self, removal_record: &RemovalRecord) { self.kernel.remove_helper(removal_record); } fn hash(&self) -> Digest { let aocl_mmr_bagged = self.kernel.aocl.bag_peaks(); let inactive_swbf_bagged = self.kernel.swbf_inactive.bag_peaks(); - let active_swbf_bagged = H::hash(&self.kernel.swbf_active); + let active_swbf_bagged = Hash::hash(&self.kernel.swbf_active); let default = Digest::default(); - H::hash_pair( - H::hash_pair(aocl_mmr_bagged, inactive_swbf_bagged), - H::hash_pair(active_swbf_bagged, default), + Hash::hash_pair( + Hash::hash_pair(aocl_mmr_bagged, inactive_swbf_bagged), + Hash::hash_pair(active_swbf_bagged, default), ) } fn batch_remove( &mut self, - removal_records: Vec>, - preserved_membership_proofs: &mut [&mut MsMembershipProof], + removal_records: Vec, + preserved_membership_proofs: &mut [&mut MsMembershipProof], ) { self.kernel .batch_remove(removal_records, preserved_membership_proofs); @@ -105,7 +106,6 @@ mod ms_accumulator_tests { }; use itertools::{izip, Itertools}; use rand::{thread_rng, Rng}; - use twenty_first::shared_math::tip5::Tip5; use crate::util_types::mutator_set::mutator_set_trait::commit; @@ -114,9 +114,8 @@ mod ms_accumulator_tests { #[test] fn mutator_set_batch_remove_accumulator_test() { // Test the batch-remove function for mutator set accumulator - type H = Tip5; - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); - let mut membership_proofs: Vec> = vec![]; + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let mut membership_proofs: Vec = vec![]; let mut items: Vec = vec![]; // Add N elements to the MS @@ -124,8 +123,7 @@ mod ms_accumulator_tests { for _ in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = accumulator.prove(item, sender_randomness, receiver_preimage); MsMembershipProof::batch_update_from_addition( @@ -145,7 +143,7 @@ mod ms_accumulator_tests { // Now build removal records for about half of the elements let mut rng = rand::thread_rng(); let mut skipped_removes: Vec = vec![]; - let mut removal_records: Vec> = vec![]; + let mut removal_records: Vec = vec![]; for (mp, &item) in membership_proofs.iter().zip_eq(items.iter()) { let skipped = rng.gen_range(0.0..1.0) < 0.5; skipped_removes.push(skipped); @@ -185,12 +183,11 @@ mod ms_accumulator_tests { // This function mixes both archival and accumulator testing. // It *may* be considered bad style to do it this way, but there is a // lot of code duplication that is avoided by doing that. - type H = Tip5; - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); - let mut rms_after = empty_rusty_mutator_set::(); + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let mut rms_after = empty_rusty_mutator_set(); let archival_after_remove = rms_after.ams_mut(); - let mut rms_before = empty_rusty_mutator_set::(); + let mut rms_before = empty_rusty_mutator_set(); let archival_before_remove = rms_before.ams_mut(); let number_of_interactions = 100; let mut rng = rand::thread_rng(); @@ -200,8 +197,8 @@ mod ms_accumulator_tests { // 2. Randomly insert and remove `number_of_interactions` times // This should test both inserting/removing in an empty MS and in a non-empty MS for start_fill in [false, true] { - let mut membership_proofs_batch: Vec> = vec![]; - let mut membership_proofs_sequential: Vec> = vec![]; + let mut membership_proofs_batch: Vec = vec![]; + let mut membership_proofs_sequential: Vec = vec![]; let mut items: Vec = vec![]; let mut rands: Vec<(Digest, Digest)> = vec![]; let mut last_ms_commitment: Option = None; @@ -227,7 +224,7 @@ mod ms_accumulator_tests { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record: AdditionRecord = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof_acc = accumulator.prove(item, sender_randomness, receiver_preimage); @@ -299,8 +296,7 @@ mod ms_accumulator_tests { let _removal_rand = rands.remove(item_index); // generate removal record - let removal_record: RemovalRecord = - accumulator.drop(removal_item, &removal_mp); + let removal_record: RemovalRecord = accumulator.drop(removal_item, &removal_mp); assert!(removal_record.validate(&accumulator.kernel)); // update membership proofs @@ -411,12 +407,10 @@ mod ms_accumulator_tests { #[test] fn test_mutator_set_accumulator_decode() { - type H = Tip5; for _ in 0..100 { - let msa = random_mutator_set_accumulator::(); + let msa = random_mutator_set_accumulator(); let encoded = msa.encode(); - let decoded: MutatorSetAccumulator = - *MutatorSetAccumulator::decode(&encoded).unwrap(); + let decoded: MutatorSetAccumulator = *MutatorSetAccumulator::decode(&encoded).unwrap(); assert_eq!(msa, decoded); } } @@ -432,7 +426,7 @@ mod ms_accumulator_tests { WINDOW_SIZE, BATCH_SIZE, CHUNK_SIZE, NUM_TRIALS ); let mut msa = MutatorSetAccumulator::new(); - let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; + let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; let target_set_size = 100; let num_iterations = 10000; @@ -463,7 +457,7 @@ mod ms_accumulator_tests { let item = rng.gen::(); let sender_randomness = rng.gen::(); let receiver_preimage = rng.gen::(); - let addition_record = commit::(item, sender_randomness, receiver_preimage); + let addition_record = commit(item, sender_randomness, receiver_preimage); for (it, mp) in items_and_membership_proofs.iter_mut() { mp.update_from_addition(*it, &msa, &addition_record) .unwrap(); diff --git a/src/util_types/mutator_set/mutator_set_kernel.rs b/src/util_types/mutator_set/mutator_set_kernel.rs index 7dc53138e..135b53abf 100644 --- a/src/util_types/mutator_set/mutator_set_kernel.rs +++ b/src/util_types/mutator_set/mutator_set_kernel.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use get_size::GetSize; @@ -39,14 +40,14 @@ pub enum MutatorSetKernelError { } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, GetSize)] -pub struct MutatorSetKernel> { +pub struct MutatorSetKernel> { pub aocl: MMR, pub swbf_inactive: MMR, - pub swbf_active: ActiveWindow, + pub swbf_active: ActiveWindow, } -// FIXME: Apply over-sampling to circumvent risk of duplicates. -pub fn get_swbf_indices( +/// Get the (absolute) indices for removing this item from the mutator set. +pub fn get_swbf_indices( item: Digest, sender_randomness: Digest, receiver_preimage: Digest, @@ -74,9 +75,9 @@ pub fn get_swbf_indices( "Input to sponge must be a multiple digest length" ); - let mut sponge = ::init(); - H::absorb_repeatedly(&mut sponge, input.iter()); - H::sample_indices(&mut sponge, WINDOW_SIZE, NUM_TRIALS as usize) + let mut sponge = ::init(); + Hash::absorb_repeatedly(&mut sponge, input.iter()); + Hash::sample_indices(&mut sponge, WINDOW_SIZE, NUM_TRIALS as usize) .into_iter() .map(|sample_index| sample_index as u128 + batch_offset) .collect_vec() @@ -84,10 +85,10 @@ pub fn get_swbf_indices( .unwrap() } -impl> MutatorSetKernel { +impl> MutatorSetKernel { /// Generates a removal record with which to update the set commitment. - pub fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { - let indices: AbsoluteIndexSet = AbsoluteIndexSet::new(&get_swbf_indices::( + pub fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord { + let indices: AbsoluteIndexSet = AbsoluteIndexSet::new(&get_swbf_indices( item, membership_proof.sender_randomness, membership_proof.receiver_preimage, @@ -145,7 +146,7 @@ impl> MutatorSetKernel { // if window slides, update filter // First update the inactive part of the SWBF, the SWBF MMR let new_chunk: Chunk = self.swbf_active.slid_chunk(); - let chunk_digest: Digest = H::hash(&new_chunk); + let chunk_digest: Digest = Hash::hash(&new_chunk); let new_chunk_index = self.swbf_inactive.count_leaves(); self.swbf_inactive.append(chunk_digest); // ignore auth path @@ -161,12 +162,12 @@ impl> MutatorSetKernel { /// Remove a record and return the chunks that have been updated in this process, /// after applying the update. Does not mutate the removal record. - pub fn remove_helper(&mut self, removal_record: &RemovalRecord) -> HashMap { + pub fn remove_helper(&mut self, removal_record: &RemovalRecord) -> HashMap { let batch_index = self.get_batch_index(); let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; // insert all indices - let mut new_target_chunks: ChunkDictionary = removal_record.target_chunks.clone(); + let mut new_target_chunks: ChunkDictionary = removal_record.target_chunks.clone(); let chunkindices_to_indices_dict: HashMap> = removal_record.get_chunkidx_to_indices_dict(); @@ -209,8 +210,8 @@ impl> MutatorSetKernel { let all_leafs = new_target_chunks .dictionary .values() - .map(|(_p, chunk)| H::hash(chunk)); - let mutation_data: Vec<(MmrMembershipProof, Digest)> = + .map(|(_p, chunk)| Hash::hash(chunk)); + let mutation_data: Vec<(MmrMembershipProof, Digest)> = all_mmr_membership_proofs.zip(all_leafs).collect(); // If we want to update the membership proof with this removal, we @@ -232,13 +233,13 @@ impl> MutatorSetKernel { item: Digest, sender_randomness: Digest, receiver_preimage: Digest, - ) -> MsMembershipProof { + ) -> MsMembershipProof { // compute commitment - let item_commitment = H::hash_pair(item, sender_randomness); + let item_commitment = Hash::hash_pair(item, sender_randomness); // simulate adding to commitment list let auth_path_aocl = self.aocl.to_accumulator().append(item_commitment); - let target_chunks: ChunkDictionary = ChunkDictionary::default(); + let target_chunks: ChunkDictionary = ChunkDictionary::default(); // return membership proof MsMembershipProof { @@ -249,7 +250,7 @@ impl> MutatorSetKernel { } } - pub fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { + pub fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool { // If data index does not exist in AOCL, return false // This also ensures that no "future" indices will be // returned from `get_indices`, so we don't have to check for @@ -259,9 +260,9 @@ impl> MutatorSetKernel { } // verify that a commitment to the item lives in the aocl mmr - let leaf = H::hash_pair( - H::hash_pair(item, membership_proof.sender_randomness), - H::hash_pair( + let leaf = Hash::hash_pair( + Hash::hash_pair(item, membership_proof.sender_randomness), + Hash::hash_pair( membership_proof.receiver_preimage, Digest::new([BFieldElement::zero(); DIGEST_LENGTH]), ), @@ -285,7 +286,7 @@ impl> MutatorSetKernel { let window_start = current_batch_index as u128 * CHUNK_SIZE as u128; // Get all bloom filter indices - let all_indices = AbsoluteIndexSet::new(&get_swbf_indices::( + let all_indices = AbsoluteIndexSet::new(&get_swbf_indices( item, membership_proof.sender_randomness, membership_proof.receiver_preimage, @@ -305,7 +306,7 @@ impl> MutatorSetKernel { break 'outer; } - let mp_and_chunk: &(mmr::mmr_membership_proof::MmrMembershipProof, Chunk) = + let mp_and_chunk: &(mmr::mmr_membership_proof::MmrMembershipProof, Chunk) = membership_proof .target_chunks .dictionary @@ -313,7 +314,7 @@ impl> MutatorSetKernel { .unwrap(); let (valid_auth_path, _) = mp_and_chunk.0.verify( &self.swbf_inactive.get_peaks(), - H::hash(&mp_and_chunk.1), + Hash::hash(&mp_and_chunk.1), self.swbf_inactive.count_leaves(), ); @@ -346,8 +347,8 @@ impl> MutatorSetKernel { /// { chunk index => updated_chunk }. pub fn batch_remove( &mut self, - mut removal_records: Vec>, - preserved_membership_proofs: &mut [&mut MsMembershipProof], + mut removal_records: Vec, + preserved_membership_proofs: &mut [&mut MsMembershipProof], ) -> HashMap { let batch_index = self.get_batch_index(); let active_window_start = batch_index as u128 * CHUNK_SIZE as u128; @@ -377,19 +378,19 @@ impl> MutatorSetKernel { // Collect all affected chunks as they look before these removal records are applied // These chunks are part of the removal records, so we fetch them there. - let mut mutation_data_preimage: HashMap)> = + let mut mutation_data_preimage: HashMap)> = HashMap::new(); for removal_record in removal_records.iter_mut() { for (chunk_index, (mmr_mp, chunk)) in removal_record.target_chunks.dictionary.iter_mut() { - let chunk_hash = H::hash(chunk); + let chunk_hash = Hash::hash(chunk); let prev_val = mutation_data_preimage.insert(*chunk_index, (chunk, mmr_mp.to_owned())); // Sanity check that all removal records agree on both chunks and MMR membership // proofs. if let Some((chnk, mm)) = prev_val { - assert!(mm == *mmr_mp && chunk_hash == H::hash(chnk)) + assert!(mm == *mmr_mp && chunk_hash == Hash::hash(chnk)) } } } @@ -417,16 +418,16 @@ impl> MutatorSetKernel { // Calculate the digests of the affected leafs in the inactive part of the sliding-window // Bloom filter such that we can apply a batch-update operation to the MMR through which // this part of the Bloom filter is represented. - let swbf_inactive_mutation_data: Vec<(MmrMembershipProof, Digest)> = + let swbf_inactive_mutation_data: Vec<(MmrMembershipProof, Digest)> = mutation_data_preimage .into_values() - .map(|x| (x.1, H::hash(x.0))) + .map(|x| (x.1, Hash::hash(x.0))) .collect(); // Create a vector of pointers to the MMR-membership part of the mutator set membership // proofs that we want to preserve. This is used as input to a batch-call to the // underlying MMR. - let mut preseved_mmr_membership_proofs: Vec<&mut MmrMembershipProof> = + let mut preseved_mmr_membership_proofs: Vec<&mut MmrMembershipProof> = preserved_membership_proofs .iter_mut() .flat_map(|x| { @@ -450,7 +451,7 @@ impl> MutatorSetKernel { /// Check if a removal record can be applied to a mutator set. Returns false if either /// the MMR membership proofs are unsynced, or if all its indices are already set. - pub fn can_remove(&self, removal_record: &RemovalRecord) -> bool { + pub fn can_remove(&self, removal_record: &RemovalRecord) -> bool { let mut have_absent_index = false; if !removal_record.validate(self) { return false; @@ -486,9 +487,7 @@ impl> MutatorSetKernel { } } -impl + BFieldCodec> BFieldCodec - for MutatorSetKernel -{ +impl + BFieldCodec> BFieldCodec for MutatorSetKernel { type Error = anyhow::Error; fn decode(sequence: &[BFieldElement]) -> anyhow::Result> { let mut index = 0; @@ -564,7 +563,6 @@ mod accumulation_scheme_tests { use rand::Rng; use tasm_lib::twenty_first::util_types::storage_vec::StorageVec; - use twenty_first::shared_math::tip5::Tip5; use twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator; use crate::config_models::network::Network; @@ -589,8 +587,8 @@ mod accumulation_scheme_tests { #[test] fn get_batch_index_test() { // Verify that the method to get batch index returns sane results - type H = Tip5; - let mut mutator_set = MutatorSetAccumulator::::default(); + + let mut mutator_set = MutatorSetAccumulator::default(); assert_eq!( 0, mutator_set.kernel.get_batch_index(), @@ -599,8 +597,7 @@ mod accumulation_scheme_tests { for i in 0..BATCH_SIZE { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); mutator_set.add(&addition_record); assert_eq!( 0, @@ -611,7 +608,7 @@ mod accumulation_scheme_tests { } let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); mutator_set.add(&addition_record); assert_eq!( 1, @@ -622,13 +619,11 @@ mod accumulation_scheme_tests { #[test] fn mutator_set_hash_test() { - type H = Tip5; - - let empty_set = MutatorSetAccumulator::::default(); + let empty_set = MutatorSetAccumulator::default(); let empty_hash = empty_set.hash(); // Add one element to append-only commitment list - let mut set_with_aocl_append = MutatorSetAccumulator::::default(); + let mut set_with_aocl_append = MutatorSetAccumulator::default(); let (item0, _sender_randomness, _receiver_preimage) = make_item_and_randomnesses(); @@ -641,7 +636,7 @@ mod accumulation_scheme_tests { ); // Manipulate inactive SWBF - let mut set_with_swbf_inactive_append = MutatorSetAccumulator::::default(); + let mut set_with_swbf_inactive_append = MutatorSetAccumulator::default(); set_with_swbf_inactive_append .kernel .swbf_inactive @@ -679,11 +674,10 @@ mod accumulation_scheme_tests { // Test that `get_indices` behaves as expected, i.e. // that it always returns something of length `NUM_TRIALS`, and that the // returned values are in the expected range. - type H = Tip5; let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let ret: [u128; NUM_TRIALS as usize] = - get_swbf_indices::(item, sender_randomness, receiver_preimage, 0); + get_swbf_indices(item, sender_randomness, receiver_preimage, 0); assert_eq!(NUM_TRIALS as usize, ret.len()); assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); } @@ -692,18 +686,18 @@ mod accumulation_scheme_tests { fn ms_get_indices_test_big() { // Test that `get_indices` behaves as expected. I.e. that it returns indices in the correct range, // and always returns something of length `NUM_TRIALS`. - type H = Tip5; + for _ in 0..1000 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let ret: [u128; NUM_TRIALS as usize] = - get_swbf_indices::(item, sender_randomness, receiver_preimage, 0); + get_swbf_indices(item, sender_randomness, receiver_preimage, 0); assert_eq!(NUM_TRIALS as usize, ret.len()); assert!(ret.iter().all(|&x| x < WINDOW_SIZE as u128)); } for _ in 0..1000 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let ret: [u128; NUM_TRIALS as usize] = get_swbf_indices::( + let ret: [u128; NUM_TRIALS as usize] = get_swbf_indices( item, sender_randomness, receiver_preimage, @@ -719,10 +713,8 @@ mod accumulation_scheme_tests { #[test] fn init_test() { - type H = Tip5; - - let accumulator = MutatorSetAccumulator::::default(); - let mut rms = empty_rusty_mutator_set::(); + let accumulator = MutatorSetAccumulator::default(); + let mut rms = empty_rusty_mutator_set(); let archival = rms.ams_mut(); // Verify that function to get batch index does not overflow for the empty MS @@ -742,16 +734,16 @@ mod accumulation_scheme_tests { fn verify_future_indices_test() { // Ensure that `verify` does not crash when given a membership proof // that represents a future addition to the AOCL. - type H = Tip5; - let mut mutator_set = MutatorSetAccumulator::::default().kernel; - let empty_mutator_set = MutatorSetAccumulator::::default().kernel; + + let mut mutator_set = MutatorSetAccumulator::default().kernel; + let empty_mutator_set = MutatorSetAccumulator::default().kernel; for _ in 0..2 * BATCH_SIZE + 2 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record: AdditionRecord = - commit::(item, sender_randomness, receiver_preimage.hash::()); - let membership_proof: MsMembershipProof = + commit(item, sender_randomness, receiver_preimage.hash::()); + let membership_proof: MsMembershipProof = mutator_set.prove(item, sender_randomness, receiver_preimage); mutator_set.add_helper(&addition_record); assert!(mutator_set.verify(item, &membership_proof)); @@ -763,23 +755,24 @@ mod accumulation_scheme_tests { #[test] fn test_membership_proof_update_from_add() { - type H = Tip5; - - let mut mutator_set = MutatorSetAccumulator::::default(); + let mut mutator_set = MutatorSetAccumulator::default(); let (own_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(own_item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit( + own_item, + sender_randomness, + receiver_preimage.hash::(), + ); let mut membership_proof = mutator_set.prove(own_item, sender_randomness, receiver_preimage); mutator_set.kernel.add_helper(&addition_record); // Update membership proof with add operation. Verify that it has changed, and that it now fails to verify. let (new_item, new_sender_randomness, new_receiver_preimage) = make_item_and_randomnesses(); - let new_addition_record = commit::( + let new_addition_record = commit( new_item, new_sender_randomness, - new_receiver_preimage.hash::(), + new_receiver_preimage.hash::(), ); let original_membership_proof = membership_proof.clone(); let changed_mp = match membership_proof.update_from_addition( @@ -822,10 +815,9 @@ mod accumulation_scheme_tests { #[test] fn membership_proof_updating_from_add_pbt() { - type H = Tip5; let mut rng = thread_rng(); - let mut mutator_set = MutatorSetAccumulator::::default(); + let mut mutator_set = MutatorSetAccumulator::default(); let num_additions = rng.gen_range(0..=100i32); println!( @@ -833,14 +825,13 @@ mod accumulation_scheme_tests { num_additions ); - let mut membership_proofs_and_items: Vec<(MsMembershipProof, Digest)> = vec![]; + let mut membership_proofs_and_items: Vec<(MsMembershipProof, Digest)> = vec![]; for i in 0..num_additions { println!("loop iteration {}", i); let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let membership_proof = mutator_set.prove(item, sender_randomness, receiver_preimage); // Update all membership proofs @@ -869,13 +860,10 @@ mod accumulation_scheme_tests { #[test] fn test_add_and_prove() { - type H = Tip5; - - let mut mutator_set = MutatorSetAccumulator::::default(); + let mut mutator_set = MutatorSetAccumulator::default(); let (item0, sender_randomness0, receiver_preimage0) = make_item_and_randomnesses(); - let addition_record = - commit::(item0, sender_randomness0, receiver_preimage0.hash::()); + let addition_record = commit(item0, sender_randomness0, receiver_preimage0.hash::()); let membership_proof = mutator_set.prove(item0, sender_randomness0, receiver_preimage0); assert!(!mutator_set.verify(item0, &membership_proof)); @@ -886,7 +874,7 @@ mod accumulation_scheme_tests { // Insert a new item and verify that this still works let (item1, sender_randomness1, receiver_preimage1) = make_item_and_randomnesses(); - let new_ar = commit::(item1, sender_randomness1, receiver_preimage1.hash::()); + let new_ar = commit(item1, sender_randomness1, receiver_preimage1.hash::()); let new_mp = mutator_set.prove(item1, sender_randomness1, receiver_preimage1); assert!(!mutator_set.verify(item1, &new_mp)); @@ -899,7 +887,7 @@ mod accumulation_scheme_tests { // position. for _ in 0..2 * BATCH_SIZE + 4 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let other_ar = commit::(item, sender_randomness, receiver_preimage.hash::()); + let other_ar = commit(item, sender_randomness, receiver_preimage.hash::()); let other_mp = mutator_set.prove(item, sender_randomness, receiver_preimage); assert!(!mutator_set.verify(item, &other_mp)); @@ -910,8 +898,7 @@ mod accumulation_scheme_tests { #[test] fn batch_update_from_addition_and_removal_test() { - type H = Tip5; - let mut mutator_set = MutatorSetAccumulator::::default(); + let mut mutator_set = MutatorSetAccumulator::default(); // It's important to test number of additions around the shifting of the window, // i.e. around batch size. @@ -926,20 +913,23 @@ mod accumulation_scheme_tests { 6 * BATCH_SIZE + 1, ]; - let mut membership_proofs: Vec> = vec![]; + let mut membership_proofs: Vec = vec![]; let mut items = vec![]; for num_additions in num_additions_list { for _ in 0..num_additions { let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(new_item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit( + new_item, + sender_randomness, + receiver_preimage.hash::(), + ); let membership_proof = mutator_set.prove(new_item, sender_randomness, receiver_preimage); // Update *all* membership proofs with newly added item - let batch_update_res = MsMembershipProof::::batch_update_from_addition( + let batch_update_res = MsMembershipProof::batch_update_from_addition( &mut membership_proofs.iter_mut().collect::>(), &items, &mutator_set.kernel, @@ -965,7 +955,7 @@ mod accumulation_scheme_tests { assert!(mutator_set.verify(item, &mp)); // generate removal record - let removal_record: RemovalRecord = mutator_set.drop(item, &mp); + let removal_record: RemovalRecord = mutator_set.drop(item, &mp); assert!(removal_record.validate(&mutator_set.kernel)); assert!(mutator_set.kernel.can_remove(&removal_record)); @@ -989,19 +979,20 @@ mod accumulation_scheme_tests { #[test] fn test_multiple_adds() { - type H = Tip5; - - let mut mutator_set = MutatorSetAccumulator::::default(); + let mut mutator_set = MutatorSetAccumulator::default(); let num_additions = 65; - let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; + let mut items_and_membership_proofs: Vec<(Digest, MsMembershipProof)> = vec![]; for _ in 0..num_additions { let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(new_item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit( + new_item, + sender_randomness, + receiver_preimage.hash::(), + ); let membership_proof = mutator_set.prove(new_item, sender_randomness, receiver_preimage); @@ -1049,7 +1040,7 @@ mod accumulation_scheme_tests { assert!(mutator_set.verify(item, &mp)); // generate removal record - let removal_record: RemovalRecord = mutator_set.drop(item, &mp); + let removal_record: RemovalRecord = mutator_set.drop(item, &mp); assert!(removal_record.validate(&mutator_set.kernel)); assert!(mutator_set.kernel.can_remove(&removal_record)); (i..items_and_membership_proofs.len()).for_each(|k| { @@ -1092,10 +1083,9 @@ mod accumulation_scheme_tests { // in the runtime. This test is to verify that that does not happen. // Cf. https://stackoverflow.com/questions/72618777/how-to-deserialize-a-nested-big-array // and https://stackoverflow.com/questions/72621410/how-do-i-use-serde-stacker-in-my-deserialize-implementation - type H = Tip5; - type Mmr = MmrAccumulator; - type Ms = MutatorSetKernel; - let mut mutator_set: Ms = MutatorSetAccumulator::::default().kernel; + type Mmr = MmrAccumulator; + type Ms = MutatorSetKernel; + let mut mutator_set: Ms = MutatorSetAccumulator::default().kernel; let json_empty = serde_json::to_string(&mutator_set).unwrap(); println!("json = \n{}", json_empty); diff --git a/src/util_types/mutator_set/mutator_set_trait.rs b/src/util_types/mutator_set/mutator_set_trait.rs index cd6a71769..23674da15 100644 --- a/src/util_types/mutator_set/mutator_set_trait.rs +++ b/src/util_types/mutator_set/mutator_set_trait.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use twenty_first::shared_math::tip5::Digest; @@ -9,17 +10,14 @@ use super::removal_record::RemovalRecord; /// Generates an addition record from an item and explicit random- /// ness. The addition record is itself a commitment to the item. -pub fn commit( - item: Digest, - sender_randomness: Digest, - receiver_digest: Digest, -) -> AdditionRecord { - let canonical_commitment = H::hash_pair(H::hash_pair(item, sender_randomness), receiver_digest); +pub fn commit(item: Digest, sender_randomness: Digest, receiver_digest: Digest) -> AdditionRecord { + let canonical_commitment = + Hash::hash_pair(Hash::hash_pair(item, sender_randomness), receiver_digest); AdditionRecord::new(canonical_commitment) } -pub trait MutatorSet { +pub trait MutatorSet { /// Generates a membership proof that will be valid when the item /// is added to the mutator set. fn prove( @@ -27,27 +25,27 @@ pub trait MutatorSet { item: Digest, sender_randomness: Digest, receiver_preimage: Digest, - ) -> MsMembershipProof; + ) -> MsMembershipProof; - fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool; + fn verify(&self, item: Digest, membership_proof: &MsMembershipProof) -> bool; /// Generates a removal record with which to update the set commitment. - fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord; + fn drop(&self, item: Digest, membership_proof: &MsMembershipProof) -> RemovalRecord; /// Updates the set-commitment with an addition record. fn add(&mut self, addition_record: &AdditionRecord); /// Updates the mutator set so as to remove the item determined by /// its removal record. - fn remove(&mut self, removal_record: &RemovalRecord); + fn remove(&mut self, removal_record: &RemovalRecord); /// batch_remove /// Apply multiple removal records, and update a list of membership proofs to /// be valid after the application of these removal records. fn batch_remove( &mut self, - removal_records: Vec>, - preserved_membership_proofs: &mut [&mut MsMembershipProof], + removal_records: Vec, + preserved_membership_proofs: &mut [&mut MsMembershipProof], ); /// hash diff --git a/src/util_types/mutator_set/removal_record.rs b/src/util_types/mutator_set/removal_record.rs index 71f1f6772..443c7be1f 100644 --- a/src/util_types/mutator_set/removal_record.rs +++ b/src/util_types/mutator_set/removal_record.rs @@ -1,3 +1,4 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use get_size::GetSize; @@ -9,7 +10,6 @@ use serde::ser::SerializeTuple; use serde::Deserialize; use serde_derive::Serialize; use std::collections::{HashMap, HashSet}; -use std::error::Error; use std::marker::PhantomData; use std::ops::IndexMut; use tasm_lib::structure::tasm_object::TasmObject; @@ -127,29 +127,29 @@ impl<'de> Deserialize<'de> for AbsoluteIndexSet { } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, GetSize, BFieldCodec, TasmObject)] -pub struct RemovalRecord { +pub struct RemovalRecord { pub absolute_indices: AbsoluteIndexSet, - pub target_chunks: ChunkDictionary, + pub target_chunks: ChunkDictionary, } -impl RemovalRecord { +impl RemovalRecord { /// Update a batch of removal records that are synced to a given mutator set, given /// that that mutator set will be updated with an addition. (The addition record /// does not matter; all necessary information is in the mutator set.) - pub fn batch_update_from_addition>( + pub fn batch_update_from_addition>( removal_records: &mut [&mut Self], - mutator_set: &mut MutatorSetKernel, + mutator_set: &mut MutatorSetKernel, ) { let new_item_index = mutator_set.aocl.count_leaves(); // if window does not slide, do nothing - if !MutatorSetKernel::::window_slides(new_item_index) { + if !MutatorSetKernel::::window_slides(new_item_index) { return; } // window does slide let new_chunk = mutator_set.swbf_active.slid_chunk(); - let new_chunk_digest: Digest = H::hash(&new_chunk); + let new_chunk_digest: Digest = Hash::hash(&new_chunk); // Insert the new chunk digest into the accumulator-version of the // SWBF MMR to get its authentication path. It's important to convert the MMR @@ -157,8 +157,8 @@ impl RemovalRecord { // a whole archival MMR for this operation, as the archival MMR can be in the // size of gigabytes, whereas the MMR accumulator should be in the size of // kilobytes. - let mut mmra: MmrAccumulator = mutator_set.swbf_inactive.to_accumulator(); - let new_swbf_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = + let mut mmra: MmrAccumulator = mutator_set.swbf_inactive.to_accumulator(); + let new_swbf_auth_path: mmr::mmr_membership_proof::MmrMembershipProof = mmra.append(new_chunk_digest); // Collect all indices for all removal records that are being updated @@ -222,7 +222,7 @@ impl RemovalRecord { // So relegating that bookkeeping to this function instead would not be more // efficient. let mut mmr_membership_proofs_for_append: Vec< - &mut mmr::mmr_membership_proof::MmrMembershipProof, + &mut mmr::mmr_membership_proof::MmrMembershipProof, > = vec![]; for (i, rr) in removal_records.iter_mut().enumerate() { if rrs_for_batch_append.contains(&i) { @@ -233,7 +233,7 @@ impl RemovalRecord { } // Perform the update of all the MMR membership proofs contained in the removal records - mmr::mmr_membership_proof::MmrMembershipProof::::batch_update_from_append( + mmr::mmr_membership_proof::MmrMembershipProof::::batch_update_from_append( &mut mmr_membership_proofs_for_append, mutator_set.swbf_inactive.count_leaves(), new_chunk_digest, @@ -243,11 +243,11 @@ impl RemovalRecord { pub fn batch_update_from_remove( removal_records: &mut [&mut Self], - applied_removal_record: &RemovalRecord, - ) -> Result<(), Box> { + applied_removal_record: &RemovalRecord, + ) { // Set all chunk values to the new values and calculate the mutation argument // for the batch updating of the MMR membership proofs. - let mut chunk_dictionaries: Vec<&mut ChunkDictionary> = removal_records + let mut chunk_dictionaries: Vec<&mut ChunkDictionary> = removal_records .iter_mut() .map(|mp| &mut mp.target_chunks) .collect(); @@ -258,7 +258,7 @@ impl RemovalRecord { ); // Collect all the MMR membership proofs from the chunk dictionaries. - let mut own_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = vec![]; + let mut own_mmr_mps: Vec<&mut mmr::mmr_membership_proof::MmrMembershipProof> = vec![]; for chunk_dict in chunk_dictionaries.iter_mut() { for (_, (mp, _)) in chunk_dict.dictionary.iter_mut() { own_mmr_mps.push(mp); @@ -270,21 +270,19 @@ impl RemovalRecord { &mut own_mmr_mps, mutation_argument, ); - - Ok(()) } /// Validates that a removal record is synchronized against the inactive part of the SWBF - pub fn validate(&self, mutator_set: &MutatorSetKernel) -> bool + pub fn validate(&self, mutator_set: &MutatorSetKernel) -> bool where - M: Mmr, + M: Mmr, { let peaks = mutator_set.swbf_inactive.get_peaks(); self.target_chunks .dictionary .iter() .all(|(_i, (proof, chunk))| { - let leaf_digest = H::hash(chunk); + let leaf_digest = Hash::hash(chunk); let leaf_count = mutator_set.swbf_inactive.count_leaves(); let (verified, _final_state) = proof.verify(&peaks, leaf_digest, leaf_count); @@ -299,7 +297,7 @@ impl RemovalRecord { } /// Generate a pseudorandom removal record from the given seed, for testing purposes. -pub fn pseudorandom_removal_record(seed: [u8; 32]) -> RemovalRecord { +pub fn pseudorandom_removal_record(seed: [u8; 32]) -> RemovalRecord { let mut rng: StdRng = SeedableRng::from_seed(seed); let absolute_indices = AbsoluteIndexSet::new( &(0..NUM_TRIALS as usize) @@ -321,7 +319,6 @@ mod removal_record_tests { use itertools::Itertools; use rand::seq::SliceRandom; use rand::{thread_rng, Rng, RngCore}; - use twenty_first::shared_math::tip5::Tip5; use crate::util_types::mutator_set::addition_record::AdditionRecord; use crate::util_types::mutator_set::ms_membership_proof::MsMembershipProof; @@ -332,13 +329,11 @@ mod removal_record_tests { use super::*; - fn get_item_mp_and_removal_record() -> (Digest, MsMembershipProof, RemovalRecord) { - type H = Tip5; - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + fn get_item_mp_and_removal_record() -> (Digest, MsMembershipProof, RemovalRecord) { + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let mp: MsMembershipProof = - accumulator.prove(item, sender_randomness, receiver_preimage); - let removal_record: RemovalRecord = accumulator.drop(item, &mp); + let mp: MsMembershipProof = accumulator.prove(item, sender_randomness, receiver_preimage); + let removal_record: RemovalRecord = accumulator.drop(item, &mp); (item, mp, removal_record) } @@ -372,22 +367,20 @@ mod removal_record_tests { #[test] fn hash_test() { - type H = Tip5; - let (_item, _mp, removal_record) = get_item_mp_and_removal_record(); - let mut removal_record_alt: RemovalRecord = removal_record.clone(); + let mut removal_record_alt: RemovalRecord = removal_record.clone(); assert_eq!( - H::hash(&removal_record), - H::hash(&removal_record_alt), + Hash::hash(&removal_record), + Hash::hash(&removal_record_alt), "Same removal record must hash to same value" ); // Verify that changing the absolute indices, changes the hash value removal_record_alt.absolute_indices.to_array_mut()[NUM_TRIALS as usize / 4] += 1; assert_ne!( - H::hash(&removal_record), - H::hash(&removal_record_alt), + Hash::hash(&removal_record), + Hash::hash(&removal_record_alt), "Changing an index must produce a new hash" ); } @@ -419,12 +412,11 @@ mod removal_record_tests { // TODO: You could argue that this test doesn't belong here, as it tests the behavior of // an imported library. I included it here, though, because the setup seems a bit clumsy // to me so far. - type H = Tip5; let (_item, _mp, removal_record) = get_item_mp_and_removal_record(); let json: String = serde_json::to_string(&removal_record).unwrap(); - let s_back = serde_json::from_str::>(&json).unwrap(); + let s_back = serde_json::from_str::(&json).unwrap(); assert_eq!(s_back.absolute_indices, removal_record.absolute_indices); assert_eq!(s_back.target_chunks, removal_record.target_chunks); } @@ -432,11 +424,10 @@ mod removal_record_tests { #[test] fn simple_remove_test() { // Verify that a single element can be added to and removed from the mutator set - type H = Tip5; - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record: AdditionRecord = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); let mp = accumulator.prove(item, sender_randomness, receiver_preimage); assert!( @@ -459,19 +450,18 @@ mod removal_record_tests { #[test] fn batch_update_from_addition_pbt() { // Verify that a single element can be added to and removed from the mutator set - type H = Tip5; let test_iterations = 10; for _ in 0..test_iterations { - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); - let mut removal_records: Vec<(usize, RemovalRecord)> = vec![]; + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + let mut removal_records: Vec<(usize, RemovalRecord)> = vec![]; let mut items = vec![]; let mut mps = vec![]; for i in 0..2 * BATCH_SIZE + 4 { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record: AdditionRecord = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); let mp = accumulator.prove(item, sender_randomness, receiver_preimage); // Update all removal records from addition, then add the element @@ -544,10 +534,10 @@ mod removal_record_tests { #[test] fn batch_update_from_addition_and_remove_pbt() { // Verify that a single element can be added to and removed from the mutator set - type H = Tip5; - let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); - let mut removal_records: Vec<(usize, RemovalRecord)> = vec![]; + let mut accumulator: MutatorSetAccumulator = MutatorSetAccumulator::default(); + + let mut removal_records: Vec<(usize, RemovalRecord)> = vec![]; let mut original_first_removal_record = None; let mut items = vec![]; let mut mps = vec![]; @@ -555,7 +545,7 @@ mod removal_record_tests { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); let addition_record: AdditionRecord = - commit::(item, sender_randomness, receiver_preimage.hash::()); + commit(item, sender_randomness, receiver_preimage.hash::()); let mp = accumulator.prove(item, sender_randomness, receiver_preimage); // Update all removal records from addition, then add the element @@ -606,18 +596,13 @@ mod removal_record_tests { for i in 0..12 * BATCH_SIZE + 4 { let remove_idx = rand::thread_rng().gen_range(0..removal_records.len()); let random_removal_record = removal_records.remove(remove_idx).1; - let update_res_rr = RemovalRecord::batch_update_from_remove( + RemovalRecord::batch_update_from_remove( &mut removal_records .iter_mut() .map(|x| &mut x.1) .collect::>(), &random_removal_record, ); - assert!( - update_res_rr.is_ok(), - "batch update must return OK, i = {}", - i - ); accumulator.remove(&random_removal_record); @@ -661,9 +646,8 @@ mod removal_record_tests { #[test] fn test_removal_record_decode() { - type H = Tip5; for _ in 0..10 { - let removal_record = random_removal_record::(); + let removal_record = random_removal_record(); let encoded = removal_record.encode(); let decoded = *RemovalRecord::decode(&encoded).unwrap(); assert_eq!(removal_record, decoded); @@ -672,22 +656,20 @@ mod removal_record_tests { #[test] fn test_removal_record_vec_decode() { - type H = Tip5; let mut rng = thread_rng(); for _ in 0..10 { let length = rng.gen_range(0..10); - let removal_records = vec![random_removal_record::(); length]; + let removal_records = vec![random_removal_record(); length]; let encoded = removal_records.encode(); - let decoded = *Vec::>::decode(&encoded).unwrap(); + let decoded = *Vec::::decode(&encoded).unwrap(); assert_eq!(removal_records, decoded); } } #[test] fn test_absindexset_record_decode() { - type H = Tip5; for _ in 0..100 { - let removal_record = random_removal_record::(); + let removal_record = random_removal_record(); let encoded_absindexset = removal_record.absolute_indices.encode(); let decoded_absindexset = *AbsoluteIndexSet::decode(&encoded_absindexset).unwrap(); assert_eq!(removal_record.absolute_indices, decoded_absindexset); diff --git a/src/util_types/mutator_set/rusty_archival_mutator_set.rs b/src/util_types/mutator_set/rusty_archival_mutator_set.rs index e76a9c58a..051f3d818 100644 --- a/src/util_types/mutator_set/rusty_archival_mutator_set.rs +++ b/src/util_types/mutator_set/rusty_archival_mutator_set.rs @@ -1,11 +1,9 @@ use crate::prelude::twenty_first; +use crate::Hash; use twenty_first::storage::level_db::DB; use twenty_first::storage::storage_schema::{traits::*, DbtSingleton, DbtVec, SimpleRustyStorage}; -use twenty_first::{ - shared_math::{bfield_codec::BFieldCodec, tip5::Digest}, - util_types::{algebraic_hasher::AlgebraicHasher, mmr::archival_mmr::ArchivalMmr}, -}; +use twenty_first::{shared_math::tip5::Digest, util_types::mmr::archival_mmr::ArchivalMmr}; use super::{ active_window::ActiveWindow, archival_mutator_set::ArchivalMutatorSet, chunk::Chunk, @@ -14,17 +12,14 @@ use super::{ type AmsMmrStorage = DbtVec; type AmsChunkStorage = DbtVec; -pub struct RustyArchivalMutatorSet -where - H: AlgebraicHasher + BFieldCodec, -{ - ams: ArchivalMutatorSet, +pub struct RustyArchivalMutatorSet { + ams: ArchivalMutatorSet, storage: SimpleRustyStorage, active_window_storage: DbtSingleton>, sync_label: DbtSingleton, } -impl RustyArchivalMutatorSet { +impl RustyArchivalMutatorSet { pub fn connect(db: DB) -> Self { let mut storage = SimpleRustyStorage::new_with_callback( db, @@ -39,13 +34,13 @@ impl RustyArchivalMutatorSet { let sync_label = storage.schema.new_singleton::("sync_label"); storage.restore_or_new(); - let kernel = MutatorSetKernel::> { - aocl: ArchivalMmr::::new(aocl), - swbf_inactive: ArchivalMmr::::new(swbfi), - swbf_active: ActiveWindow::::new(), + let kernel = MutatorSetKernel::> { + aocl: ArchivalMmr::::new(aocl), + swbf_inactive: ArchivalMmr::::new(swbfi), + swbf_active: ActiveWindow::new(), }; - let ams = ArchivalMutatorSet:: { chunks, kernel }; + let ams = ArchivalMutatorSet:: { chunks, kernel }; Self { ams, @@ -56,12 +51,12 @@ impl RustyArchivalMutatorSet { } #[inline] - pub fn ams(&self) -> &ArchivalMutatorSet { + pub fn ams(&self) -> &ArchivalMutatorSet { &self.ams } #[inline] - pub fn ams_mut(&mut self) -> &mut ArchivalMutatorSet { + pub fn ams_mut(&mut self) -> &mut ArchivalMutatorSet { &mut self.ams } @@ -76,7 +71,7 @@ impl RustyArchivalMutatorSet { } } -impl StorageWriter for RustyArchivalMutatorSet { +impl StorageWriter for RustyArchivalMutatorSet { fn persist(&mut self) { self.active_window_storage .set(self.ams().kernel.swbf_active.sbf.clone()); @@ -122,8 +117,7 @@ mod tests { let db = DB::open_new_test_database(false, None, None, None).unwrap(); let db_path = db.path().clone(); - let mut rusty_mutator_set: RustyArchivalMutatorSet = - RustyArchivalMutatorSet::connect(db); + let mut rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db); println!("Connected to database"); rusty_mutator_set.restore_or_new(); println!("Restored or new odne."); @@ -138,8 +132,7 @@ mod tests { for _ in 0..num_additions { let (item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = - commit::(item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit(item, sender_randomness, receiver_preimage.hash::()); let mp = rusty_mutator_set .ams() @@ -213,7 +206,7 @@ mod tests { // new database let new_db = DB::open_test_database(&db_path, true, None, None, None) .expect("should open existing database"); - let mut new_rusty_mutator_set: RustyArchivalMutatorSet = + let mut new_rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(new_db); new_rusty_mutator_set.restore_or_new(); diff --git a/src/util_types/mutator_set/shared.rs b/src/util_types/mutator_set/shared.rs index e8d5c2eab..cf06a1201 100644 --- a/src/util_types/mutator_set/shared.rs +++ b/src/util_types/mutator_set/shared.rs @@ -1,9 +1,9 @@ +use crate::models::blockchain::shared::Hash; use crate::prelude::twenty_first; use std::collections::{HashMap, HashSet}; -use twenty_first::shared_math::bfield_codec::BFieldCodec; -use twenty_first::shared_math::tip5::Digest; +use tasm_lib::Digest; use twenty_first::util_types::algebraic_hasher::AlgebraicHasher; use twenty_first::util_types::mmr::mmr_membership_proof::MmrMembershipProof; @@ -54,12 +54,12 @@ pub fn indices_to_hash_map(all_indices: &[u128; NUM_TRIALS as usize]) -> HashMap /// This function is factored out because it is shared by `update_from_remove` /// and `batch_update_from_remove`. #[allow(clippy::type_complexity)] -pub fn get_batch_mutation_argument_for_removal_record( - removal_record: &RemovalRecord, - chunk_dictionaries: &mut [&mut ChunkDictionary], -) -> (HashSet, Vec<(MmrMembershipProof, Digest)>) { +pub fn get_batch_mutation_argument_for_removal_record( + removal_record: &RemovalRecord, + chunk_dictionaries: &mut [&mut ChunkDictionary], +) -> (HashSet, Vec<(MmrMembershipProof, Digest)>) { // chunk index -> (mmr mp, chunk hash) - let mut batch_modification_hash_map: HashMap, Digest)> = + let mut batch_modification_hash_map: HashMap, Digest)> = HashMap::new(); // `mutated_chunk_dictionaries` records the indices into the // input `chunk_dictionaries` slice that shows which elements @@ -85,7 +85,7 @@ pub fn get_batch_mutation_argument_for_removal_record( - removal_record: &RemovalRecord, - chunk_dictionaries: &mut [&mut ChunkDictionary], -) -> (HashSet, Vec<(MmrMembershipProof, Digest)>) { +pub fn prepare_authenticated_batch_modification_for_removal_record_reversion( + removal_record: &RemovalRecord, + chunk_dictionaries: &mut [&mut ChunkDictionary], +) -> (HashSet, Vec<(MmrMembershipProof, Digest)>) { // chunk index -> (mmr mp, chunk hash) - let mut batch_modification_hash_map: HashMap, Digest)> = + let mut batch_modification_hash_map: HashMap, Digest)> = HashMap::new(); // `mutated_chunk_dictionaries` records the indices in `chunk_dictionaries` @@ -180,7 +180,7 @@ pub fn prepare_authenticated_batch_modification_for_removal_record_reversion< // *old* (before reversion) MMR membership proof. if !batch_modification_hash_map.contains_key(chunk_index) { batch_modification_hash_map - .insert(*chunk_index, (mmr_mp.to_owned(), H::hash(chunk))); + .insert(*chunk_index, (mmr_mp.to_owned(), Hash::hash(chunk))); } } @@ -209,8 +209,10 @@ pub fn prepare_authenticated_batch_modification_for_removal_record_reversion< // Since all indices have been applied to the chunk in the above // for-loop, we can calculate the hash of the updated chunk now. - batch_modification_hash_map - .insert(*chunk_index, (mp.to_owned(), H::hash(&target_chunk))); + batch_modification_hash_map.insert( + *chunk_index, + (mp.to_owned(), Hash::hash(&target_chunk)), + ); } } }; diff --git a/src/util_types/test_shared/mutator_set.rs b/src/util_types/test_shared/mutator_set.rs index 00717c8c0..6f877595c 100644 --- a/src/util_types/test_shared/mutator_set.rs +++ b/src/util_types/test_shared/mutator_set.rs @@ -7,7 +7,6 @@ use itertools::Itertools; use rand::rngs::StdRng; use rand::{thread_rng, Rng, RngCore, SeedableRng}; -use twenty_first::shared_math::bfield_codec::BFieldCodec; use twenty_first::shared_math::other::{log_2_ceil, log_2_floor}; use twenty_first::shared_math::tip5::Digest; use twenty_first::storage::level_db::DB; @@ -33,18 +32,18 @@ use crate::util_types::mutator_set::mutator_set_trait::commit; use crate::util_types::mutator_set::removal_record::{pseudorandom_removal_record, RemovalRecord}; use crate::util_types::mutator_set::rusty_archival_mutator_set::RustyArchivalMutatorSet; use crate::util_types::mutator_set::shared::{CHUNK_SIZE, WINDOW_SIZE}; +use crate::Hash; -pub fn random_chunk_dictionary() -> ChunkDictionary { +pub fn random_chunk_dictionary() -> ChunkDictionary { let mut rng = thread_rng(); pseudorandom_chunk_dictionary(rng.gen::<[u8; 32]>()) } pub fn get_all_indices_with_duplicates< - H: AlgebraicHasher + BFieldCodec, MmrStorage: StorageVec, ChunkStorage: StorageVec, >( - archival_mutator_set: &mut ArchivalMutatorSet, + archival_mutator_set: &mut ArchivalMutatorSet, ) -> Vec { let mut ret: Vec = vec![]; @@ -72,43 +71,45 @@ pub fn make_item_and_randomnesses() -> (Digest, Digest, Digest) { } #[allow(clippy::type_complexity)] -pub fn empty_rusty_mutator_set() -> RustyArchivalMutatorSet { +pub fn empty_rusty_mutator_set() -> RustyArchivalMutatorSet { let db = DB::open_new_test_database(true, None, None, None).unwrap(); - let rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db); + let rusty_mutator_set: RustyArchivalMutatorSet = RustyArchivalMutatorSet::connect(db); rusty_mutator_set } -pub fn insert_mock_item>( - mutator_set: &mut MutatorSetKernel, -) -> (MsMembershipProof, Digest) { +pub fn insert_mock_item>( + mutator_set: &mut MutatorSetKernel, +) -> (MsMembershipProof, Digest) { let (new_item, sender_randomness, receiver_preimage) = make_item_and_randomnesses(); - let addition_record = commit::(new_item, sender_randomness, receiver_preimage.hash::()); + let addition_record = commit( + new_item, + sender_randomness, + receiver_preimage.hash::(), + ); let membership_proof = mutator_set.prove(new_item, sender_randomness, receiver_preimage); mutator_set.add_helper(&addition_record); (membership_proof, new_item) } -pub fn remove_mock_item>( - mutator_set: &mut MutatorSetKernel, +pub fn remove_mock_item>( + mutator_set: &mut MutatorSetKernel, item: Digest, - mp: &MsMembershipProof, + mp: &MsMembershipProof, ) { - let removal_record: RemovalRecord = mutator_set.drop(item, mp); + let removal_record: RemovalRecord = mutator_set.drop(item, mp); mutator_set.remove_helper(&removal_record); } /// Generate a random MSA. For serialization testing. Might not be a consistent or valid object. -pub fn random_mutator_set_accumulator() -> MutatorSetAccumulator -{ +pub fn random_mutator_set_accumulator() -> MutatorSetAccumulator { let kernel = random_mutator_set_kernel(); MutatorSetAccumulator { kernel } } /// Generate a random MSK. For serialization testing. Might not be a consistent or valid object. -pub fn random_mutator_set_kernel( -) -> MutatorSetKernel> { +pub fn random_mutator_set_kernel() -> MutatorSetKernel> { let aocl = random_mmra(); let swbf_inactive = random_mmra(); let swbf_active = random_swbf_active(); @@ -345,11 +346,11 @@ pub fn pseudorandom_merkle_root_with_authentication_paths( (root, paths) } -pub fn random_swbf_active() -> ActiveWindow { +pub fn random_swbf_active() -> ActiveWindow { let mut rng = thread_rng(); let num_indices = 10 + (rng.next_u32() % 100) as usize; - let mut aw = ActiveWindow::::new(); + let mut aw = ActiveWindow::new(); for _ in 0..num_indices { aw.insert(rng.next_u32() % WINDOW_SIZE); } @@ -362,11 +363,11 @@ pub fn _random_mmr_membership_proof() -> MmrMembershipProof< } /// Generate a random MsMembershipProof. For serialization testing. Might not be a consistent or valid object. -pub fn random_mutator_set_membership_proof() -> MsMembershipProof { +pub fn random_mutator_set_membership_proof() -> MsMembershipProof { pseudorandom_mutator_set_membership_proof(thread_rng().gen()) } -pub fn random_removal_record() -> RemovalRecord { +pub fn random_removal_record() -> RemovalRecord { let mut rng = thread_rng(); pseudorandom_removal_record(rng.gen::<[u8; 32]>()) } @@ -391,17 +392,14 @@ fn merkle_verify_tester_helper( #[cfg(test)] mod shared_tests_test { - use twenty_first::shared_math::tip5::Tip5; - use super::*; #[test] fn can_call() { - type H = Tip5; - let rcd = random_chunk_dictionary::(); + let rcd = random_chunk_dictionary(); assert!(!rcd.dictionary.is_empty()); - let _ = random_removal_record::(); - let mut rms = empty_rusty_mutator_set::(); + let _ = random_removal_record(); + let mut rms = empty_rusty_mutator_set(); let ams = rms.ams_mut(); let _ = get_all_indices_with_duplicates(ams); let _ = make_item_and_randomnesses(); @@ -410,16 +408,14 @@ mod shared_tests_test { #[test] fn test_pseudorandom_mmra_with_single_mp() { - type H = Tip5; let mut rng = thread_rng(); let leaf: Digest = rng.gen(); - let (mmra, mp) = pseudorandom_mmra_with_mp::(rng.gen(), leaf); + let (mmra, mp) = pseudorandom_mmra_with_mp::(rng.gen(), leaf); assert!(mp.verify(&mmra.get_peaks(), leaf, mmra.count_leaves()).0); } #[test] fn test_pseudorandom_root_with_authentication_paths() { - type H = Tip5; let seed: [u8; 32] = thread_rng().gen(); let mut outer_rng: StdRng = SeedableRng::from_seed(seed); for num_leafs in 0..20 { @@ -438,14 +434,14 @@ mod shared_tests_test { } let leafs: Vec = (0..num_leafs).map(|_| inner_rng.gen()).collect_vec(); let leafs_and_indices = leafs.into_iter().zip(indices.into_iter()).collect_vec(); - let (root, paths) = pseudorandom_merkle_root_with_authentication_paths::( + let (root, paths) = pseudorandom_merkle_root_with_authentication_paths::( inner_rng.gen(), tree_height, &leafs_and_indices, ); for ((leaf, index), path) in leafs_and_indices.into_iter().zip(paths.into_iter()) { assert!( - merkle_verify_tester_helper::(root, index, &path, leaf), + merkle_verify_tester_helper::(root, index, &path, leaf), "failure observed for num_leafs: {num_leafs} and seed: {inner_seed:?}" ); } @@ -454,7 +450,6 @@ mod shared_tests_test { #[test] fn test_pseudorandom_mmra_with_mps() { - type H = Tip5; let seed: [u8; 32] = thread_rng().gen(); let mut outer_rng: StdRng = SeedableRng::from_seed(seed); for num_leafs in 0..20 { @@ -462,7 +457,7 @@ mod shared_tests_test { let mut inner_rng: StdRng = SeedableRng::from_seed(inner_seed); let leafs: Vec = (0..num_leafs).map(|_| inner_rng.gen()).collect_vec(); - let (mmra, mps) = pseudorandom_mmra_with_mps::(inner_rng.gen(), &leafs); + let (mmra, mps) = pseudorandom_mmra_with_mps::(inner_rng.gen(), &leafs); for (leaf, mp) in leafs.into_iter().zip(mps) { assert!( mp.verify(&mmra.get_peaks(), leaf, mmra.count_leaves()).0,