diff --git a/.gitignore b/.gitignore index 8f6bdec8..e20a84bc 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ working_dir # Walrus binary and configuration walrus +!/contracts/walrus examples/CONFIG/bin/walrus client_config.yaml examples/CONFIG/config_dir/client_config.yaml diff --git a/contracts/blob_store/Move.lock b/contracts/blob_store/Move.lock deleted file mode 100644 index 0b6f01a9..00000000 --- a/contracts/blob_store/Move.lock +++ /dev/null @@ -1,26 +0,0 @@ -# @generated by Move, please check-in and do not edit manually. - -[move] -version = 2 -manifest_digest = "C461A25DAED3234921DF6DD2B4AE93FF11BA907C5A5CF469400757C595C15B68" -deps_digest = "F8BBB0CCB2491CA29A3DF03D6F92277A4F3574266507ACD77214D37ECA3F3082" -dependencies = [ - { name = "Sui" }, -] - -[[move.package]] -name = "MoveStdlib" -source = { git = "https://github.com/MystenLabs/sui.git", rev = "testnet-v1.31.1", subdir = "crates/sui-framework/packages/move-stdlib" } - -[[move.package]] -name = "Sui" -source = { git = "https://github.com/MystenLabs/sui.git", rev = "testnet-v1.31.1", subdir = "crates/sui-framework/packages/sui-framework" } - -dependencies = [ - { name = "MoveStdlib" }, -] - -[move.toolchain-version] -compiler-version = "1.30.1" -edition = "2024.beta" -flavor = "sui" diff --git a/contracts/blob_store/sources/blob.move b/contracts/blob_store/sources/blob.move deleted file mode 100644 index 85877a80..00000000 --- a/contracts/blob_store/sources/blob.move +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::blob { - use sui::bcs; - use sui::hash; - - use blob_store::committee::{Self, CertifiedMessage}; - use blob_store::system::System; - use blob_store::storage_resource::{ - Storage, - start_epoch, - end_epoch, - storage_size, - fuse_periods, - destroy, - }; - use blob_store::encoding; - use blob_store::blob_events::{emit_blob_registered, emit_blob_certified}; - - // A certify blob message structure - const BLOB_CERT_MSG_TYPE: u8 = 1; - - // Error codes - const EInvalidMsgType: u64 = 1; - const EResourceBounds: u64 = 2; - const EResourceSize: u64 = 3; - const EWrongEpoch: u64 = 4; - const EAlreadyCertified: u64 = 5; - const EInvalidBlobId: u64 = 6; - const ENotCertified: u64 = 7; - - // Object definitions - - /// The blob structure represents a blob that has been registered to with some storage, - /// and then may eventually be certified as being available in the system. - public struct Blob has key, store { - id: UID, - stored_epoch: u64, - blob_id: u256, - size: u64, - erasure_code_type: u8, - certified_epoch: option::Option, // Store the epoch first certified - storage: Storage, - } - - // Accessor functions - - public fun stored_epoch(b: &Blob): u64 { - b.stored_epoch - } - - public fun blob_id(b: &Blob): u256 { - b.blob_id - } - - public fun size(b: &Blob): u64 { - b.size - } - - public fun erasure_code_type(b: &Blob): u8 { - b.erasure_code_type - } - - public fun certified_epoch(b: &Blob): &Option { - &b.certified_epoch - } - - public fun storage(b: &Blob): &Storage { - &b.storage - } - - public struct BlobIdDerivation has drop { - erasure_code_type: u8, - size: u64, - root_hash: u256, - } - - /// Derive the blob_id for a blob given the root_hash, erasure_code_type and size. - public fun derive_blob_id(root_hash: u256, erasure_code_type: u8, size: u64): u256 { - let blob_id_struct = BlobIdDerivation { - erasure_code_type, - size, - root_hash, - }; - - let serialized = bcs::to_bytes(&blob_id_struct); - let encoded = hash::blake2b256(&serialized); - let mut decoder = bcs::new(encoded); - let blob_id = decoder.peel_u256(); - blob_id - } - - /// Register a new blob in the system. - /// `size` is the size of the unencoded blob. The reserved space in `storage` must be at - /// least the size of the encoded blob. - public fun register( - sys: &System, - storage: Storage, - blob_id: u256, - root_hash: u256, - size: u64, - erasure_code_type: u8, - ctx: &mut TxContext, - ): Blob { - let id = object::new(ctx); - let stored_epoch = sys.epoch(); - - // Check resource bounds. - assert!(stored_epoch >= start_epoch(&storage), EResourceBounds); - assert!(stored_epoch < end_epoch(&storage), EResourceBounds); - - // check that the encoded size is less than the storage size - let encoded_size = encoding::encoded_blob_length( - size, - erasure_code_type, - sys.n_shards(), - ); - assert!(encoded_size <= storage_size(&storage), EResourceSize); - - // Cryptographically verify that the Blob ID authenticates - // both the size and fe_type. - assert!( - derive_blob_id(root_hash, erasure_code_type, size) == blob_id, - EInvalidBlobId, - ); - - // Emit register event - emit_blob_registered( - stored_epoch, - blob_id, - size, - erasure_code_type, - end_epoch(&storage), - ); - - Blob { - id, - stored_epoch, - blob_id, - size, - // - erasure_code_type, - certified_epoch: option::none(), - storage, - } - } - - public struct CertifiedBlobMessage has drop { - epoch: u64, - blob_id: u256, - } - - /// Construct the certified blob message, note that constructing - /// implies a certified message, that is already checked. - public fun certify_blob_message(message: CertifiedMessage): CertifiedBlobMessage { - // Assert type is correct - assert!(message.intent_type() == BLOB_CERT_MSG_TYPE, EInvalidMsgType); - - // The certified blob message contain a blob_id : u256 - let epoch = message.cert_epoch(); - let message_body = message.into_message(); - - let mut bcs_body = bcs::new(message_body); - let blob_id = bcs_body.peel_u256(); - - // On purpose we do not check that nothing is left in the message - // to allow in the future for extensibility. - - CertifiedBlobMessage { epoch, blob_id } - } - - /// Certify that a blob will be available in the storage system until the end epoch of the - /// storage associated with it, given a [`CertifiedBlobMessage`]. - public fun certify_with_certified_msg( - sys: &System, - message: CertifiedBlobMessage, - blob: &mut Blob, - ) { - // Check that the blob is registered in the system - assert!(blob_id(blob) == message.blob_id, EInvalidBlobId); - - // Check that the blob is not already certified - assert!(!blob.certified_epoch.is_some(), EAlreadyCertified); - - // Check that the message is from the current epoch - assert!(message.epoch == sys.epoch(), EWrongEpoch); - - // Check that the storage in the blob is still valid - assert!(message.epoch < end_epoch(storage(blob)), EResourceBounds); - - // Mark the blob as certified - blob.certified_epoch = option::some(message.epoch); - - // Emit certified event - emit_blob_certified( - message.epoch, - message.blob_id, - end_epoch(storage(blob)), - ); - } - - /// Certify that a blob will be available in the storage system until the end epoch of the - /// storage associated with it. - public fun certify( - sys: &System, - blob: &mut Blob, - signature: vector, - members: vector, - message: vector, - ) { - let certified_msg = committee::verify_quorum_in_epoch( - sys.current_committee(), - signature, - members, - message, - ); - let certified_blob_msg = certify_blob_message(certified_msg); - certify_with_certified_msg(sys, certified_blob_msg, blob); - } - - /// After the period of validity expires for the blob we can destroy the blob resource. - public fun destroy_blob(sys: &System, blob: Blob) { - let current_epoch = sys.epoch(); - assert!(current_epoch >= end_epoch(storage(&blob)), EResourceBounds); - - // Destroy the blob - let Blob { - id, - stored_epoch: _, - blob_id: _, - size: _, - erasure_code_type: _, - certified_epoch: _, - storage, - } = blob; - - id.delete(); - destroy(storage); - } - - /// Extend the period of validity of a blob with a new storage resource. - /// The new storage resource must be the same size as the storage resource - /// used in the blob, and have a longer period of validity. - public fun extend(sys: &System, blob: &mut Blob, extension: Storage) { - // We only extend certified blobs within their period of validity - // with storage that extends this period. First we check for these - // conditions. - - // Assert this is a certified blob - assert!(blob.certified_epoch.is_some(), ENotCertified); - - // Check the blob is within its availability period - assert!(sys.epoch() < end_epoch(storage(blob)), EResourceBounds); - - // Check that the extension is valid, and the end - // period of the extension is after the current period. - assert!(end_epoch(&extension) > end_epoch(storage(blob)), EResourceBounds); - - // Note: if the amounts do not match there will be an abort here. - fuse_periods(&mut blob.storage, extension); - - // Emit certified event - // - // Note: We use the original certified period since for the purposes of - // reconfiguration this is the committee that has a quorum that hold the - // resource. - emit_blob_certified( - *option::borrow(&blob.certified_epoch), - blob.blob_id, - end_epoch(storage(blob)), - ); - } - - // Testing Functions - - #[test_only] - public fun drop_for_testing(b: Blob) { - // deconstruct - let Blob { - id, - stored_epoch: _, - blob_id: _, - size: _, - erasure_code_type: _, - certified_epoch: _, - storage, - } = b; - - id.delete(); - destroy(storage); - } - - #[test_only] - // Accessor for blob - public fun message_blob_id(m: &CertifiedBlobMessage): u256 { - m.blob_id - } - - #[test_only] - public fun certified_blob_message_for_testing(epoch: u64, blob_id: u256): CertifiedBlobMessage { - CertifiedBlobMessage { epoch, blob_id } - } -} diff --git a/contracts/blob_store/sources/blob_events.move b/contracts/blob_store/sources/blob_events.move deleted file mode 100644 index 6b87dafb..00000000 --- a/contracts/blob_store/sources/blob_events.move +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -/// Module to emit blob events. Used to allow filtering all blob events in the -/// rust client (as work-around for the lack of composable event filters). -module blob_store::blob_events { - use sui::event; - - // Event definitions - - /// Signals a blob with meta-data is registered. - public struct BlobRegistered has copy, drop { - epoch: u64, - blob_id: u256, - size: u64, - erasure_code_type: u8, - end_epoch: u64, - } - - /// Signals a blob is certified. - public struct BlobCertified has copy, drop { - epoch: u64, - blob_id: u256, - end_epoch: u64, - } - - /// Signals that a BlobID is invalid. - public struct InvalidBlobID has copy, drop { - epoch: u64, // The epoch in which the blob ID is first registered as invalid - blob_id: u256, - } - - public(package) fun emit_blob_registered( - epoch: u64, - blob_id: u256, - size: u64, - erasure_code_type: u8, - end_epoch: u64, - ) { - event::emit(BlobRegistered { epoch, blob_id, size, erasure_code_type, end_epoch }); - } - - public(package) fun emit_blob_certified(epoch: u64, blob_id: u256, end_epoch: u64) { - event::emit(BlobCertified { epoch, blob_id, end_epoch }); - } - - public(package) fun emit_invalid_blob_id(epoch: u64, blob_id: u256) { - event::emit(InvalidBlobID { epoch, blob_id }); - } -} diff --git a/contracts/blob_store/sources/bls_aggregate.move b/contracts/blob_store/sources/bls_aggregate.move deleted file mode 100644 index 8568dcc6..00000000 --- a/contracts/blob_store/sources/bls_aggregate.move +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -// editorconfig-checker-disable-file - -module blob_store::bls_aggregate { - use sui::group_ops::Self; - use sui::bls12381::{Self, bls12381_min_pk_verify}; - - use blob_store::storage_node::StorageNodeInfo; - - // Error codes - const ETotalMemberOrder: u64 = 0; - const ESigVerification: u64 = 1; - const ENotEnoughStake: u64 = 2; - const EIncorrectCommittee: u64 = 3; - - /// This represents a BLS signing committee. - public struct BlsCommittee has store, drop { - /// A vector of committee members - members: vector, - /// The total number of shards held by the committee - n_shards: u16, - } - - /// Constructor - public fun new_bls_committee(members: vector): BlsCommittee { - // Compute the total number of shards - let mut n_shards = 0; - let mut i = 0; - while (i < members.length()) { - let added_weight = members[i].weight(); - assert!(added_weight > 0, EIncorrectCommittee); - n_shards = n_shards + added_weight; - i = i + 1; - }; - assert!(n_shards != 0, EIncorrectCommittee); - - BlsCommittee { members, n_shards } - } - - /// Returns the number of shards held by the committee. - public fun n_shards(self: &BlsCommittee): u16 { - self.n_shards - } - - /// Verify an aggregate BLS signature is a certificate in the epoch, and return the type of - /// certificate and the bytes certified. The `signers` vector is an increasing list of indexes - /// into the `members` vector of the committee. If there is a certificate, the function - /// returns the total stake. Otherwise, it aborts. - public fun verify_certificate( - self: &BlsCommittee, - signature: &vector, - signers: &vector, - message: &vector, - ): u16 { - // Use the signers flags to construct the key and the weights. - - // Lower bound for the next `member_index` to ensure they are monotonically increasing - let mut min_next_member_index = 0; - let mut i = 0; - - let mut aggregate_key = bls12381::g1_identity(); - let mut aggregate_weight = 0; - - while (i < signers.length()) { - let member_index = signers[i] as u64; - assert!(member_index >= min_next_member_index, ETotalMemberOrder); - min_next_member_index = member_index + 1; - - // Bounds check happens here - let member = &self.members[member_index]; - let key = member.public_key(); - let weight = member.weight(); - - aggregate_key = bls12381::g1_add(&aggregate_key, key); - aggregate_weight = aggregate_weight + weight; - - i = i + 1; - }; - - // The expression below is the solution to the inequality: - // n_shards = 3 f + 1 - // stake >= 2f + 1 - assert!( - 3 * (aggregate_weight as u64) >= 2 * (self.n_shards as u64) + 1, - ENotEnoughStake, - ); - - // Verify the signature - let pub_key_bytes = group_ops::bytes(&aggregate_key); - assert!( - bls12381_min_pk_verify( - signature, - pub_key_bytes, - message, - ), - ESigVerification, - ); - - (aggregate_weight as u16) - } - - - #[test_only] - use blob_store::storage_node::Self; - - #[test_only] - /// Test committee - public fun new_bls_committee_for_testing(): BlsCommittee { - // Pk corresponding to secret key scalar(117) - let pub_key_bytes = x"95eacc3adc09c827593f581e8e2de068bf4cf5d0c0eb29e5372f0d23364788ee0f9beb112c8a7e9c2f0c720433705cf0"; - let storage_node = storage_node::new_for_testing(pub_key_bytes, 100); - BlsCommittee { members: vector[storage_node], n_shards: 100 } - } -} diff --git a/contracts/blob_store/sources/committee.move b/contracts/blob_store/sources/committee.move deleted file mode 100644 index 04116952..00000000 --- a/contracts/blob_store/sources/committee.move +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::committee { - use sui::bcs; - - const APP_ID: u8 = 3; - - // Errors - const EIncorrectAppId: u64 = 0; - const EIncorrectEpoch: u64 = 1; - - #[test_only] - use blob_store::bls_aggregate::new_bls_committee_for_testing; - - use blob_store::bls_aggregate::{Self, BlsCommittee, new_bls_committee, verify_certificate}; - use blob_store::storage_node::StorageNodeInfo; - - /// Represents a committee for a given epoch - /// - /// The construction of a committee for a type is a controlled operation - /// and signifies that the committee is valid for the given epoch. It has - /// no drop since valid committees must be stored for ever. And no copy - /// since each epoch must only have one committee. Finally, no key since - /// It must never be stored outside controlled places. - /// - /// The above restrictions allow us to implement a separation between committee - /// formation and the actual System object. One structure - /// can take care of the epoch management including the committee formation, and - /// the System object can simply receive a committee of the correct type as a - /// signal that the new epoch has started. - public struct Committee has store { - epoch: u64, - bls_committee: BlsCommittee, - } - - /// Get the epoch of the committee. - public fun epoch(self: &Committee): u64 { - self.epoch - } - - /// A capability that allows the creation of committees - public struct CreateCommitteeCap has copy, store, drop {} - - /// A constructor for the capability to create committees - /// This is only accessible through friend modules. - public(package) fun create_committee_cap(): CreateCommitteeCap { - CreateCommitteeCap {} - } - - /// Returns the number of shards held by the committee. - public fun n_shards(self: &Committee): u16 { - bls_aggregate::n_shards(&self.bls_committee) - } - - #[test_only] - /// A constructor for the capability to create committees for tests - public fun create_committee_cap_for_tests(): CreateCommitteeCap { - CreateCommitteeCap {} - } - - /// Creating a committee for a given epoch. - /// Requires a capability - public fun create_committee( - _cap: &CreateCommitteeCap, - epoch: u64, - members: vector, - ): Committee { - // Make BlsCommittee - let bls_committee = new_bls_committee(members); - - Committee { epoch, bls_committee } - } - - #[test_only] - public fun committee_for_testing(epoch: u64): Committee { - let bls_committee = new_bls_committee_for_testing(); - Committee { epoch, bls_committee } - } - - #[test_only] - public fun committee_for_testing_with_bls(epoch: u64, bls_committee: BlsCommittee): Committee { - Committee { epoch, bls_committee } - } - - public struct CertifiedMessage has drop { - intent_type: u8, - intent_version: u8, - cert_epoch: u64, - stake_support: u16, - message: vector, - } - - #[test_only] - public fun certified_message_for_testing( - intent_type: u8, - intent_version: u8, - cert_epoch: u64, - stake_support: u16, - message: vector, - ): CertifiedMessage { - CertifiedMessage { intent_type, intent_version, cert_epoch, stake_support, message } - } - - // Make accessors for the CertifiedMessage - public fun intent_type(self: &CertifiedMessage): u8 { - self.intent_type - } - - public fun intent_version(self: &CertifiedMessage): u8 { - self.intent_version - } - - public fun cert_epoch(self: &CertifiedMessage): u64 { - self.cert_epoch - } - - public fun stake_support(self: &CertifiedMessage): u16 { - self.stake_support - } - - public fun message(self: &CertifiedMessage): &vector { - &self.message - } - - // Deconstruct into the vector of message bytes - public fun into_message(self: CertifiedMessage): vector { - self.message - } - - /// Verifies that a message is signed by a quorum of the members of a committee. - /// - /// The members are listed in increasing order and with no repetitions. And the signatures - /// match the order of the members. The total stake is returned, but if a quorum is not reached - /// the function aborts with an error. - public fun verify_quorum_in_epoch( - committee: &Committee, - signature: vector, - members: vector, - message: vector, - ): CertifiedMessage { - let stake_support = verify_certificate( - &committee.bls_committee, - &signature, - &members, - &message, - ); - - // Here we BCS decode the header of the message to check intents, epochs, etc. - - let mut bcs_message = bcs::new(message); - let intent_type = bcs_message.peel_u8(); - let intent_version = bcs_message.peel_u8(); - - let intent_app = bcs_message.peel_u8(); - assert!(intent_app == APP_ID, EIncorrectAppId); - - let cert_epoch = bcs_message.peel_u64(); - assert!(cert_epoch == epoch(committee), EIncorrectEpoch); - - let message = bcs_message.into_remainder_bytes(); - - CertifiedMessage { intent_type, intent_version, cert_epoch, stake_support, message } - } -} diff --git a/contracts/blob_store/sources/e2etest.move b/contracts/blob_store/sources/e2etest.move deleted file mode 100644 index e42c6cd7..00000000 --- a/contracts/blob_store/sources/e2etest.move +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::e2e_test { - use blob_store::committee::{Self, CreateCommitteeCap}; - use blob_store::storage_node; - - public struct CommitteeCapHolder has key, store { - id: UID, - cap: CreateCommitteeCap, - } - - // NOTE: the function below is means to be used as part of a PTB to construct a committee - // The PTB contains a number of `create_storage_node_info` invocations, then - // a `MakeMoveVec` invocation, and finally a `make_committee` invocation. - - /// Create a committee given a capability and a list of storage nodes - public fun make_committee( - cap: &CommitteeCapHolder, - epoch: u64, - storage_nodes: vector, - ): committee::Committee { - committee::create_committee( - &cap.cap, - epoch, - storage_nodes, - ) - } - - fun init(ctx: &mut TxContext) { - // Create a committee caps - let committee_cap = committee::create_committee_cap(); - - // We send the wrapped cap to the creator of the package - transfer::public_transfer( - CommitteeCapHolder { id: object::new(ctx), cap: committee_cap }, - ctx.sender(), - ); - } -} diff --git a/contracts/blob_store/sources/encoding.move b/contracts/blob_store/sources/encoding.move deleted file mode 100644 index 7b67c375..00000000 --- a/contracts/blob_store/sources/encoding.move +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::encoding { - use blob_store::redstuff; - - // Supported Encoding Types - const RED_STUFF_ENCODING: u8 = 0; - - // Errors - const EInvalidEncoding: u64 = 0; - - /// Computes the encoded length of a blob given its unencoded length, encoding type - /// and number of shards `n_shards`. - public fun encoded_blob_length(unencoded_length: u64, encoding_type: u8, n_shards: u16): u64 { - // Currently only supports a single encoding type - assert!(encoding_type == RED_STUFF_ENCODING, EInvalidEncoding); - redstuff::encoded_blob_length(unencoded_length, n_shards) - } -} diff --git a/contracts/blob_store/sources/redstuff.move b/contracts/blob_store/sources/redstuff.move deleted file mode 100644 index ca0208ca..00000000 --- a/contracts/blob_store/sources/redstuff.move +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::redstuff { - // The length of a hash used for the Red Stuff metadata - const DIGEST_LEN: u64 = 32; - - // The length of a blob id in the stored metadata - const BLOB_ID_LEN: u64 = 32; - - /// Computes the encoded length of a blob for the Red Stuff encoding, given its - /// unencoded size and the number of shards. The output length includes the - /// size of the metadata hashes and the blob ID. - public(package) fun encoded_blob_length(unencoded_length: u64, n_shards: u16): u64 { - let slivers_size = (source_symbols_primary(n_shards) as u64 - + (source_symbols_secondary(n_shards) as u64)) - * (symbol_size(unencoded_length, n_shards) as u64); - - (n_shards as u64) * (slivers_size + metadata_size(n_shards)) - } - - /// The number of primary source symbols per sliver given `n_shards`. - fun source_symbols_primary(n_shards: u16): u16 { - n_shards - max_byzantine(n_shards) - decoding_safety_limit(n_shards) - } - - /// The number of secondary source symbols per sliver given `n_shards`. - fun source_symbols_secondary(n_shards: u16): u16 { - n_shards - 2 * max_byzantine(n_shards) - decoding_safety_limit(n_shards) - } - - /// The total number of source symbols given `n_shards`. - fun n_source_symbols(n_shards: u16): u64 { - (source_symbols_primary(n_shards) as u64) * (source_symbols_secondary(n_shards) as u64) - } - - /// Computes the symbol size given the `unencoded_length` and number of shards - /// `n_shards`. If the resulting symbols would be larger than a `u16`, this - /// results in an Error. - fun symbol_size(mut unencoded_length: u64, n_shards: u16): u16 { - if (unencoded_length == 0) { - unencoded_length = 1; - }; - let n_symbols = n_source_symbols(n_shards); - ((unencoded_length - 1) / n_symbols + 1) as u16 - } - - /// The size of the metadata, i.e. sliver root hashes and blob_id. - fun metadata_size(n_shards: u16): u64 { - (n_shards as u64) * DIGEST_LEN * 2 + BLOB_ID_LEN - } - - /// Returns the decoding safety limit. - fun decoding_safety_limit(n_shards: u16): u16 { - // These ranges are chosen to ensure that the safety limit is at most 20% of f, - // up to a safety limit of 5. - min_u16(max_byzantine(n_shards) / 5, 5) - } - - /// Maximum number of byzantine shards, given `n_shards`. - fun max_byzantine(n_shards: u16): u16 { - (n_shards - 1) / 3 - } - - fun min_u16(a: u16, b: u16): u16 { - if (a < b) { - a - } else { - b - } - } - - // Tests - - #[test_only] - fun assert_encoded_size(unencoded_length: u64, n_shards: u16, encoded_size: u64) { - assert!(encoded_blob_length(unencoded_length, n_shards) == encoded_size, 0); - } - - #[test] - fun test_encoded_size() { - assert_encoded_size(1, 10, 10 * ((4 + 7) + 10 * 2 * 32 + 32)); - assert_encoded_size(1, 1000, 1000 * ((329 + 662) + 1000 * 2 * 32 + 32)); - assert_encoded_size((4 * 7) * 100, 10, 10 * ((4 + 7) * 100 + 10 * 2 * 32 + 32)); - assert_encoded_size( - (329 * 662) * 100, - 1000, - 1000 * ((329 + 662) * 100 + 1000 * 2 * 32 + 32), - ); - } - - #[test] - fun test_zero_size() { - //test should fail here - encoded_blob_length(0, 10); - } - - #[test,expected_failure] - fun test_symbol_too_large() { - let n_shards = 100; - // Create an unencoded length for which each symbol must be larger than the maximum size - let unencoded_length = (0xffff + 1) * n_source_symbols(n_shards); - // Test should fail here - let _ = symbol_size(unencoded_length, n_shards); - } -} diff --git a/contracts/blob_store/sources/storage_accounting.move b/contracts/blob_store/sources/storage_accounting.move deleted file mode 100644 index f971f1bf..00000000 --- a/contracts/blob_store/sources/storage_accounting.move +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::storage_accounting { - use sui::balance::{Self, Balance}; - - // Errors - const EIndexOutOfBounds: u64 = 3; - - /// Holds information about a future epoch, namely how much - /// storage needs to be reclaimed and the rewards to be distributed. - public struct FutureAccounting has store { - epoch: u64, - storage_to_reclaim: u64, - rewards_to_distribute: Balance, - } - - /// Constructor for FutureAccounting - public fun new_future_accounting( - epoch: u64, - storage_to_reclaim: u64, - rewards_to_distribute: Balance, - ): FutureAccounting { - FutureAccounting { epoch, storage_to_reclaim, rewards_to_distribute } - } - - /// Accessor for epoch, read-only - public fun epoch(accounting: &FutureAccounting): u64 { - *&accounting.epoch - } - - /// Accessor for storage_to_reclaim, mutable. - public fun storage_to_reclaim(accounting: &mut FutureAccounting): u64 { - accounting.storage_to_reclaim - } - - /// Increase storage to reclaim - public fun increase_storage_to_reclaim( - accounting: &mut FutureAccounting, - amount: u64, - ) { - accounting.storage_to_reclaim = accounting.storage_to_reclaim + amount; - } - - /// Accessor for rewards_to_distribute, mutable. - public fun rewards_to_distribute( - accounting: &mut FutureAccounting, - ): &mut Balance { - &mut accounting.rewards_to_distribute - } - - /// Destructor for FutureAccounting, when empty. - public fun delete_empty_future_accounting(self: FutureAccounting) { - let FutureAccounting { - epoch: _, - storage_to_reclaim: _, - rewards_to_distribute, - } = self; - - rewards_to_distribute.destroy_zero() - } - - #[test_only] - public fun burn_for_testing(self: FutureAccounting) { - let FutureAccounting { - epoch: _, - storage_to_reclaim: _, - rewards_to_distribute, - } = self; - - rewards_to_distribute.destroy_for_testing(); - } - - /// A ring buffer holding future accounts for a continuous range of epochs. - public struct FutureAccountingRingBuffer has store { - current_index: u64, - length: u64, - ring_buffer: vector>, - } - - /// Constructor for FutureAccountingRingBuffer - public fun ring_new(length: u64): FutureAccountingRingBuffer { - let mut ring_buffer: vector> = vector::empty(); - let mut i = 0; - while (i < length) { - ring_buffer.push_back(FutureAccounting { - epoch: i, - storage_to_reclaim: 0, - rewards_to_distribute: balance::zero(), - }); - i = i + 1; - }; - - FutureAccountingRingBuffer { current_index: 0, length: length, ring_buffer: ring_buffer } - } - - /// Lookup an entry a number of epochs in the future. - public fun ring_lookup_mut( - self: &mut FutureAccountingRingBuffer, - epochs_in_future: u64, - ): &mut FutureAccounting { - // Check for out-of-bounds access. - assert!(epochs_in_future < self.length, EIndexOutOfBounds); - - let actual_index = (epochs_in_future + self.current_index) % self.length; - &mut self.ring_buffer[actual_index] - } - - public fun ring_pop_expand( - self: &mut FutureAccountingRingBuffer, - ): FutureAccounting { - // Get current epoch - let current_index = self.current_index; - let current_epoch = self.ring_buffer[current_index].epoch; - - // Expand the ring buffer - self - .ring_buffer - .push_back(FutureAccounting { - epoch: current_epoch + self.length, - storage_to_reclaim: 0, - rewards_to_distribute: balance::zero(), - }); - - // Now swap remove the current element and increment the current_index - let accounting = self.ring_buffer.swap_remove(current_index); - self.current_index = (current_index + 1) % self.length; - - accounting - } -} diff --git a/contracts/blob_store/sources/storage_node.move b/contracts/blob_store/sources/storage_node.move deleted file mode 100644 index bbd6b9f1..00000000 --- a/contracts/blob_store/sources/storage_node.move +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::storage_node { - use std::string::String; - use sui::group_ops::Element; - use sui::bls12381::{G1, g1_from_bytes}; - - // Error codes - const EInvalidNetworkPublicKey: u64 = 1; - - /// Represents a storage node and its meta-data. - /// - /// Creation and deletion of storage node info is an - /// uncontrolled operation, but it lacks key so cannot - /// be stored outside the context of another object. - public struct StorageNodeInfo has store, drop { - name: String, - network_address: String, - public_key: Element, - network_public_key: vector, - shard_ids: vector, - } - - /// A public constructor for the StorageNodeInfo. - public fun create_storage_node_info( - name: String, - network_address: String, - public_key: vector, - network_public_key: vector, - shard_ids: vector, - ): StorageNodeInfo { - assert!(network_public_key.length() == 32, EInvalidNetworkPublicKey); - StorageNodeInfo { - name, - network_address, - public_key: g1_from_bytes(&public_key), - network_public_key, - shard_ids - } - } - - public fun public_key(self: &StorageNodeInfo): &Element { - &self.public_key - } - - public fun network_public_key(self: &StorageNodeInfo): &vector { - &self.network_public_key - } - - public fun shard_ids(self: &StorageNodeInfo): &vector { - &self.shard_ids - } - - public fun weight(self: &StorageNodeInfo): u16 { - self.shard_ids.length() as u16 - } - - #[test_only] - /// Create a storage node with dummy name & address - public fun new_for_testing(public_key: vector, weight: u16): StorageNodeInfo { - let mut i: u16 = 0; - let mut shard_ids = vector[]; - while (i < weight) { - shard_ids.push_back(i); - i = i + 1; - }; - StorageNodeInfo { - name: b"node".to_string(), - network_address: b"127.0.0.1".to_string(), - public_key: g1_from_bytes(&public_key), - network_public_key: x"820e2b273530a00de66c9727c40f48be985da684286983f398ef7695b8a44677", - shard_ids, - } - } -} diff --git a/contracts/blob_store/sources/storage_resource.move b/contracts/blob_store/sources/storage_resource.move deleted file mode 100644 index 7e7fc1a0..00000000 --- a/contracts/blob_store/sources/storage_resource.move +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::storage_resource { - const EInvalidEpoch: u64 = 0; - const EIncompatibleEpochs: u64 = 1; - const EIncompatibleAmount: u64 = 2; - - /// Reservation for storage for a given period, which is inclusive start, exclusive end. - public struct Storage has key, store { - id: UID, - start_epoch: u64, - end_epoch: u64, - storage_size: u64, - } - - public fun start_epoch(self: &Storage): u64 { - self.start_epoch - } - - public fun end_epoch(self: &Storage): u64 { - self.end_epoch - } - - public fun storage_size(self: &Storage): u64 { - self.storage_size - } - - /// Constructor for [Storage] objects. - /// Necessary to allow `blob_store::system` to create storage objects. - /// Cannot be called outside of the current module and [blob_store::system]. - public(package) fun create_storage( - start_epoch: u64, - end_epoch: u64, - storage_size: u64, - ctx: &mut TxContext, - ): Storage { - Storage { id: object::new(ctx), start_epoch, end_epoch, storage_size } - } - - /// Split the storage object into two based on `split_epoch` - /// - /// `storage` is modified to cover the period from `start_epoch` to `split_epoch` - /// and a new storage object covering `split_epoch` to `end_epoch` is returned. - public fun split_by_epoch( - storage: &mut Storage, - split_epoch: u64, - ctx: &mut TxContext, - ): Storage { - assert!( - split_epoch >= storage.start_epoch && split_epoch <= storage.end_epoch, - EInvalidEpoch, - ); - let end_epoch = storage.end_epoch; - storage.end_epoch = split_epoch; - Storage { - id: object::new(ctx), - start_epoch: split_epoch, - end_epoch, - storage_size: storage.storage_size, - } - } - - /// Split the storage object into two based on `split_size` - /// - /// `storage` is modified to cover `split_size` and a new object covering - /// `storage.storage_size - split_size` is created. - public fun split_by_size(storage: &mut Storage, split_size: u64, ctx: &mut TxContext): Storage { - let storage_size = storage.storage_size - split_size; - storage.storage_size = split_size; - Storage { - id: object::new(ctx), - start_epoch: storage.start_epoch, - end_epoch: storage.end_epoch, - storage_size, - } - } - - /// Fuse two storage objects that cover adjacent periods with the same storage size. - public fun fuse_periods(first: &mut Storage, second: Storage) { - let Storage { - id, - start_epoch: second_start, - end_epoch: second_end, - storage_size: second_size, - } = second; - id.delete(); - assert!(first.storage_size == second_size, EIncompatibleAmount); - if (first.end_epoch == second_start) { - first.end_epoch = second_end; - } else { - assert!(first.start_epoch == second_end, EIncompatibleEpochs); - first.start_epoch = second_start; - } - } - - /// Fuse two storage objects that cover the same period - public fun fuse_amount(first: &mut Storage, second: Storage) { - let Storage { - id, - start_epoch: second_start, - end_epoch: second_end, - storage_size: second_size, - } = second; - id.delete(); - assert!( - first.start_epoch == second_start && first.end_epoch == second_end, - EIncompatibleEpochs, - ); - first.storage_size = first.storage_size + second_size; - } - - /// Fuse two storage objects that either cover the same period - /// or adjacent periods with the same storage size. - public fun fuse(first: &mut Storage, second: Storage) { - if (first.start_epoch == second.start_epoch) { - // Fuse by storage_size - fuse_amount(first, second); - } else { - // Fuse by period - fuse_periods(first, second); - } - } - - #[test_only] - /// Constructor for [Storage] objects for tests - public fun create_for_test( - start_epoch: u64, - end_epoch: u64, - storage_size: u64, - ctx: &mut TxContext, - ): Storage { - Storage { id: object::new(ctx), start_epoch, end_epoch, storage_size } - } - - /// Destructor for [Storage] objects - public fun destroy(storage: Storage) { - let Storage { - id, - start_epoch: _, - end_epoch: _, - storage_size: _, - } = storage; - id.delete(); - } -} diff --git a/contracts/blob_store/sources/system.move b/contracts/blob_store/sources/system.move deleted file mode 100644 index 50927f89..00000000 --- a/contracts/blob_store/sources/system.move +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -module blob_store::system { - use sui::coin::Coin; - use sui::event; - use sui::table::{Self, Table}; - use sui::bcs; - - use blob_store::committee::{Self, Committee}; - use blob_store::storage_accounting::{Self, FutureAccounting, FutureAccountingRingBuffer}; - use blob_store::storage_resource::{Self, Storage}; - use blob_store::blob_events::emit_invalid_blob_id; - - // Errors - const EIncorrectCommittee: u64 = 0; - const ESyncEpochChange: u64 = 1; - const EInvalidPeriodsAhead: u64 = 2; - const EStorageExceeded: u64 = 3; - const EInvalidMsgType: u64 = 4; - const EInvalidIdEpoch: u64 = 5; - - // Message types: - const EPOCH_DONE_MSG_TYPE: u8 = 0; - const INVALID_BLOB_ID_MSG_TYPE: u8 = 2; - - // Epoch status values - #[allow(unused_const)] - const EPOCH_STATUS_DONE: u8 = 0; - #[allow(unused_const)] - const EPOCH_STATUS_SYNC: u8 = 1; - - /// The maximum number of periods ahead we allow for storage reservations. - /// This number is a placeholder, and assumes an epoch is a week, - /// and therefore 2 x 52 weeks = 2 years. - const MAX_PERIODS_AHEAD: u64 = 104; - - // Keep in sync with the same constant in `crates/walrus-sui/utils.rs`. - const BYTES_PER_UNIT_SIZE: u64 = 1_024; - - // Event types - - /// Signals an epoch change, and entering the SYNC state for the new epoch. - public struct EpochChangeSync has copy, drop { - epoch: u64, - total_capacity_size: u64, - used_capacity_size: u64, - } - - /// Signals that the epoch change is DONE now. - public struct EpochChangeDone has copy, drop { - epoch: u64, - } - - // Object definitions - - #[allow(unused_field)] - public struct System has key, store { - id: UID, - /// The current committee, with the current epoch. - /// The option is always Some, but need it for swap. - current_committee: Option, - /// When we first enter the current epoch we SYNC, - /// and then we are DONE after a cert from a quorum. - epoch_status: u8, - // Some accounting - total_capacity_size: u64, - used_capacity_size: u64, - /// The price per unit size of storage. - price_per_unit_size: u64, - /// Tables about the future and the past. - past_committees: Table, - future_accounting: FutureAccountingRingBuffer, - } - - /// Get epoch. Uses the committee to get the epoch. - public fun epoch(self: &System): u64 { - committee::epoch(option::borrow(&self.current_committee)) - } - - /// Accessor for total capacity size. - public fun total_capacity_size(self: &System): u64 { - self.total_capacity_size - } - - /// Accessor for used capacity size. - public fun used_capacity_size(self: &System): u64 { - self.used_capacity_size - } - - /// A privileged constructor for an initial system object, - /// at epoch 0 with a given committee, and a given - /// capacity and price. Here ownership of a committee at time 0 - /// acts as a capability to create a init a new system object. - public fun new( - first_committee: Committee, - capacity: u64, - price: u64, - ctx: &mut TxContext, - ): System { - assert!(first_committee.epoch() == 0, EIncorrectCommittee); - - // We emit both sync and done events for the first epoch. - event::emit(EpochChangeSync { - epoch: 0, - total_capacity_size: capacity, - used_capacity_size: 0, - }); - event::emit(EpochChangeDone { epoch: 0 }); - - System { - id: object::new(ctx), - current_committee: option::some(first_committee), - epoch_status: EPOCH_STATUS_DONE, - total_capacity_size: capacity, - used_capacity_size: 0, - price_per_unit_size: price, - past_committees: table::new(ctx), - future_accounting: storage_accounting::ring_new(MAX_PERIODS_AHEAD), - } - } - - // We actually create a new objects that does not exist before, so all is good. - #[allow(lint(share_owned))] - /// Create and share a new system object, using ownership of a committee - /// at epoch 0 as a capability to create a new system object. - public fun share_new( - first_committee: Committee, - capacity: u64, - price: u64, - ctx: &mut TxContext, - ) { - let sys: System = new(first_committee, capacity, price, ctx); - transfer::share_object(sys); - } - - /// An accessor for the current committee. - public fun current_committee(self: &System): &Committee { - self.current_committee.borrow() - } - - public fun n_shards(self: &System): u16 { - current_committee(self).n_shards() - } - - /// Update epoch to next epoch, and also update the committee, price and capacity. - public fun next_epoch( - self: &mut System, - new_committee: Committee, - new_capacity: u64, - new_price: u64, - ): FutureAccounting { - // Must be in DONE state to move epochs. This is the way. - assert!(self.epoch_status == EPOCH_STATUS_DONE, ESyncEpochChange); - - // Check new committee is valid, the existence of a committee for the next epoch - // is proof that the time has come to move epochs. - let old_epoch = epoch(self); - let new_epoch = old_epoch + 1; - assert!(new_committee.epoch() == new_epoch, EIncorrectCommittee); - let old_committee = self.current_committee.swap(new_committee); - - // Add the old committee to the past_committees table. - self.past_committees.add(old_epoch, old_committee); - - // Update the system object. - self.total_capacity_size = new_capacity; - self.price_per_unit_size = new_price; - self.epoch_status = EPOCH_STATUS_SYNC; - - let mut accounts_old_epoch = self.future_accounting.ring_pop_expand(); - assert!(accounts_old_epoch.epoch() == old_epoch, ESyncEpochChange); - - // Update storage based on the accounts data. - self.used_capacity_size = self.used_capacity_size - accounts_old_epoch.storage_to_reclaim(); - - // Emit Sync event. - event::emit(EpochChangeSync { - epoch: new_epoch, - total_capacity_size: self.total_capacity_size, - used_capacity_size: self.used_capacity_size, - }); - - accounts_old_epoch - } - - /// Allow buying a storage reservation for a given period of epochs. - public fun reserve_space( - self: &mut System, - storage_amount: u64, - periods_ahead: u64, - mut payment: Coin, - ctx: &mut TxContext, - ): (Storage, Coin) { - // Check the period is within the allowed range. - assert!(periods_ahead > 0, EInvalidPeriodsAhead); - assert!(periods_ahead <= MAX_PERIODS_AHEAD, EInvalidPeriodsAhead); - - // Check capacity is available. - assert!( - self.used_capacity_size + storage_amount <= self.total_capacity_size, - EStorageExceeded, - ); - - // Pay rewards for each future epoch into the future accounting. - let storage_units = (storage_amount + BYTES_PER_UNIT_SIZE - 1) / BYTES_PER_UNIT_SIZE; - let period_payment_due = self.price_per_unit_size * storage_units; - let coin_balance = payment.balance_mut(); - - let mut i = 0; - while (i < periods_ahead) { - let accounts = self.future_accounting.ring_lookup_mut(i); - - // Distribute rewards - let rewards_balance = accounts.rewards_to_distribute(); - // Note this will abort if the balance is not enough. - let epoch_payment = coin_balance.split(period_payment_due); - rewards_balance.join(epoch_payment); - - i = i + 1; - }; - - // Update the storage accounting. - self.used_capacity_size = self.used_capacity_size + storage_amount; - - // Account the space to reclaim in the future. - let final_account = self.future_accounting.ring_lookup_mut(periods_ahead - 1); - final_account.increase_storage_to_reclaim(storage_amount); - - let self_epoch = epoch(self); - ( - storage_resource::create_storage( - self_epoch, - self_epoch + periods_ahead, - storage_amount, - ctx, - ), - payment, - ) - } - - #[test_only] - public fun set_done_for_testing(self: &mut System) { - self.epoch_status = EPOCH_STATUS_DONE; - } - - // The logic to move epoch from SYNC to DONE. - - /// Define a message type for the SyncDone message. - /// It may only be constructed when a valid certified message is - /// passed in. - public struct CertifiedSyncDone has drop { - epoch: u64, - } - - /// Construct the certified sync done message, note that constructing - /// implies a certified message, that is already checked. - public fun certify_sync_done_message(message: committee::CertifiedMessage): CertifiedSyncDone { - // Assert type is correct - assert!(message.intent_type() == EPOCH_DONE_MSG_TYPE, EInvalidMsgType); - - // The SyncDone message has no payload besides the epoch. - // Which happens to already be parsed in the header of the - // certified message. - - CertifiedSyncDone { epoch: message.cert_epoch() } - } - - // make a test only certified message. - #[test_only] - public fun make_sync_done_message_for_testing(epoch: u64): CertifiedSyncDone { - CertifiedSyncDone { epoch } - } - - /// Use the certified message to advance the epoch status to DONE. - public fun sync_done_for_epoch(system: &mut System, message: CertifiedSyncDone) { - // Assert the epoch is correct. - assert!(message.epoch == epoch(system), ESyncEpochChange); - - // Assert we are in the sync state. - assert!(system.epoch_status == EPOCH_STATUS_SYNC, ESyncEpochChange); - - // Move to done state. - system.epoch_status = EPOCH_STATUS_DONE; - - event::emit(EpochChangeDone { epoch: message.epoch }); - } - - // The logic to register an invalid Blob ID - - /// Define a message type for the InvalidBlobID message. - /// It may only be constructed when a valid certified message is - /// passed in. - public struct CertifiedInvalidBlobID has drop { - epoch: u64, - blob_id: u256, - } - - // read the blob id - public fun invalid_blob_id(self: &CertifiedInvalidBlobID): u256 { - self.blob_id - } - - /// Construct the certified invalid Blob ID message, note that constructing - /// implies a certified message, that is already checked. - public fun invalid_blob_id_message( - message: committee::CertifiedMessage, - ): CertifiedInvalidBlobID { - // Assert type is correct - assert!( - message.intent_type() == INVALID_BLOB_ID_MSG_TYPE, - EInvalidMsgType, - ); - - // The InvalidBlobID message has no payload besides the blob_id. - // The certified blob message contain a blob_id : u256 - let epoch = message.cert_epoch(); - let message_body = message.into_message(); - - let mut bcs_body = bcs::new(message_body); - let blob_id = bcs_body.peel_u256(); - - // This output is provided as a service in case anything else needs to rely on - // certified invalid blob ID information in the future. But out base design only - // uses the event emitted here. - CertifiedInvalidBlobID { epoch, blob_id } - } - - /// Private System call to process invalid blob id message. This checks that the epoch - /// in which the message was certified is correct, before emitting an event. Correct - /// nodes will only certify invalid blob ids within their period of validity, and this - /// endures we are not flooded with invalid events from past epochs. - public(package) fun inner_declare_invalid_blob_id( - system: &System, - message: CertifiedInvalidBlobID, - ) { - // Assert the epoch is correct. - let epoch = message.epoch; - assert!(epoch == epoch(system), EInvalidIdEpoch); - - // Emit the event about a blob id being invalid here. - emit_invalid_blob_id( - epoch, - message.blob_id, - ); - } - - /// Public system call to process invalid blob id message. Will check the - /// the certificate in the current committee and ensure that the epoch is - /// correct as well. - public fun invalidate_blob_id( - system: &System, - signature: vector, - members: vector, - message: vector, - ): u256 { - let committee = system.current_committee.borrow(); - - let certified_message = committee.verify_quorum_in_epoch( - signature, - members, - message, - ); - - let invalid_blob_message = invalid_blob_id_message(certified_message); - let blob_id = invalid_blob_message.blob_id; - inner_declare_invalid_blob_id(system, invalid_blob_message); - blob_id - } -} diff --git a/contracts/blob_store/sources/tests/blob_tests.move b/contracts/blob_store/sources/tests/blob_tests.move deleted file mode 100644 index 75a834d4..00000000 --- a/contracts/blob_store/sources/tests/blob_tests.move +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::blob_tests { - use sui::coin; - use sui::bcs; - - use std::string; - - use blob_store::committee; - use blob_store::system; - use blob_store::storage_accounting as sa; - use blob_store::blob; - use blob_store::storage_node; - - use blob_store::storage_resource::{split_by_epoch, destroy}; - - const RED_STUFF: u8 = 0; - const NETWORK_PUBLIC_KEY: vector = - x"820e2b273530a00de66c9727c40f48be985da684286983f398ef7695b8a44677"; - - public struct TESTWAL has store, drop {} - - #[test] - public fun test_blob_register_happy_path(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test, expected_failure(abort_code=blob::EResourceSize)] - public fun test_blob_insufficient_space(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - TOO LITTLE SPACE - let (storage, fake_coin) = system::reserve_space( - &mut system, - 5000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test] - public fun test_blob_certify_happy_path(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - let certify_message = blob::certified_blob_message_for_testing(0, blob_id); - - // Set certify - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - // Assert certified - assert!(option::is_some(blob::certified_epoch(&blob1)), 0); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test] - public fun test_blob_certify_single_function(): system::System { - let mut ctx = tx_context::dummy(); - - // Derive blob ID and root_hash from bytes - let root_hash_vec = vector[ - 1, 2, 3, 4, 5, 6, 7, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - ]; - - let mut encode = bcs::new(root_hash_vec); - let root_hash = bcs::peel_u256(&mut encode); - - let blob_id_vec = vector[ - 119, 174, 25, 167, 128, 57, 96, 1, - 163, 56, 61, 132, 191, 35, 44, 18, - 231, 224, 79, 178, 85, 51, 69, 53, - 214, 95, 198, 203, 56, 221, 111, 83 - ]; - - let mut encode = bcs::new(blob_id_vec); - let blob_id = bcs::peel_u256(&mut encode); - - // Derive and check blob ID - let blob_id_bis = blob::derive_blob_id(root_hash, RED_STUFF, 10000); - assert!(blob_id == blob_id_bis, 0); - - // BCS confirmation message for epoch 0 and blob id `blob_id` with intents - let confirmation = vector[ - 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, - 119, 174, 25, 167, 128, 57, 96, 1, 163, 56, 61, 132, - 191, 35, 44, 18, 231, 224, 79, 178, 85, 51, 69, 53, 214, - 95, 198, 203, 56, 221, 111, 83 - ]; - // Signature from private key scalar(117) on `confirmation` - let signature = vector[ - 184, 138, 78, 92, 221, 170, 180, 107, 75, 249, 222, 177, 183, 25, 107, 214, 237, - 214, 213, 12, 239, 65, 88, 112, 65, 229, 225, 23, 62, 158, 144, 67, 206, 37, 148, - 1, 69, 64, 190, 180, 121, 153, 39, 149, 41, 2, 112, 69, 23, 68, 69, 159, 192, 116, - 41, 113, 21, 116, 123, 169, 204, 165, 232, 70, 146, 1, 175, 70, 126, 14, 20, 206, - 113, 234, 141, 195, 218, 52, 172, 56, 78, 168, 114, 213, 241, 83, 188, 215, 123, - 191, 111, 136, 26, 193, 60, 246 - ]; - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create storage node - // Pk corresponding to secret key scalar(117) - let public_key = vector[ - 149, 234, 204, 58, 220, 9, 200, 39, 89, 63, 88, 30, 142, 45, - 224, 104, 191, 76, 245, 208, 192, 235, 41, 229, 55, 47, 13, 35, 54, 71, 136, 238, 15, - 155, 235, 17, 44, 138, 126, 156, 47, 12, 114, 4, 51, 112, 92, 240 - ]; - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5], - ); - - // Create a new committee - let cap = committee::create_committee_cap_for_tests(); - let committee = committee::create_committee(&cap, 0, vector[storage_node]); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let mut blob1 = blob::register( - &system, - storage, - blob_id, - root_hash, - 10000, - RED_STUFF, - &mut ctx, - ); - - // Set certify - blob::certify(&system, &mut blob1, signature, vector[0], confirmation); - - // Assert certified - assert!(option::is_some(blob::certified_epoch(&blob1)), 0); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test, expected_failure(abort_code=blob::EWrongEpoch)] - public fun test_blob_certify_bad_epoch(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // Set INCORRECT EPOCH TO 1 - let certify_message = blob::certified_blob_message_for_testing(1, blob_id); - - // Set certify - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test, expected_failure(abort_code=blob::EInvalidBlobId)] - public fun test_blob_certify_bad_blob_id(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // DIFFERENT blob id - let certify_message = blob::certified_blob_message_for_testing(0, 0xFFF); - - // Set certify - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test, expected_failure(abort_code=blob::EResourceBounds)] - public fun test_blob_certify_past_epoch(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 2 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(2); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 3 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(3); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Set certify -- EPOCH BEYOND RESOURCE BOUND - let certify_message = blob::certified_blob_message_for_testing(3, blob_id); - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test] - public fun test_blob_happy_destroy(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // Set certify - let certify_message = blob::certified_blob_message_for_testing(0, blob_id); - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 2 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(2); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 3 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(3); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Destroy the blob - blob::destroy_blob(&system, blob1); - - coin::burn_for_testing(fake_coin); - system - } - - #[test, expected_failure(abort_code=blob::EResourceBounds)] - public fun test_blob_unhappy_destroy(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // Destroy the blob - blob::destroy_blob(&system, blob1); - - coin::burn_for_testing(fake_coin); - system - } - - #[test] - public fun test_certified_blob_message() { - let msg = committee::certified_message_for_testing( - 1, 0, 10, 100, vector[ - 0xAA, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - ] - ); - - let message = blob::certify_blob_message(msg); - assert!(blob::message_blob_id(&message) == 0xAA, 0); - } - - #[test, expected_failure] - public fun test_certified_blob_message_too_short() { - let msg = committee::certified_message_for_testing( - 1, 0, 10, 100, vector[ - 0xAA, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ], - ); - - let message = blob::certify_blob_message(msg); - assert!(blob::message_blob_id(&message) == 0xAA, 0); - } - - #[test] - public fun test_blob_extend_happy_path(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Get a longer storage period - let (mut storage_long, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 5, - fake_coin, - &mut ctx, - ); - - // Split by period - let trailing_storage = split_by_epoch(&mut storage_long, 3, &mut ctx); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - let certify_message = blob::certified_blob_message_for_testing(0, blob_id); - - // Set certify - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - // Now extend the blob - blob::extend(&system, &mut blob1, trailing_storage); - - // Assert certified - assert!(option::is_some(blob::certified_epoch(&blob1)), 0); - - destroy(storage_long); - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test, expected_failure] - public fun test_blob_extend_bad_period(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Get a longer storage period - let (mut storage_long, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 5, - fake_coin, - &mut ctx, - ); - - // Split by period - let trailing_storage = split_by_epoch(&mut storage_long, 4, &mut ctx); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - let certify_message = blob::certified_blob_message_for_testing(0, 0xABC); - - // Set certify - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - // Now extend the blob // ITS THE WRONG PERIOD - blob::extend(&system, &mut blob1, trailing_storage); - - destroy(storage_long); - coin::burn_for_testing(fake_coin); - blob::drop_for_testing(blob1); - system - } - - #[test,expected_failure(abort_code=blob::EResourceBounds)] - public fun test_blob_unhappy_extend(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100000000, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000000000, 5, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 3, - fake_coin, - &mut ctx, - ); - - // Get a longer storage period - let (mut storage_long, fake_coin) = system::reserve_space( - &mut system, - 1_000_000, - 5, - fake_coin, - &mut ctx, - ); - - // Split by period - let trailing_storage = split_by_epoch(&mut storage_long, 3, &mut ctx); - - // Register a Blob - let blob_id = blob::derive_blob_id(0xABC, RED_STUFF, 5000); - let mut blob1 = blob::register(&system, storage, blob_id, 0xABC, 5000, RED_STUFF, &mut ctx); - - // Set certify - let certify_message = blob::certified_blob_message_for_testing(0, blob_id); - blob::certify_with_certified_msg(&system, certify_message, &mut blob1); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 2 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(2); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Advance epoch -- to epoch 3 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(3); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - sa::burn_for_testing(epoch_accounts); - - // Try to extend after expiry. - - // Now extend the blo - blob::extend(&system, &mut blob1, trailing_storage); - - // Destroy the blob - blob::destroy_blob(&system, blob1); - - destroy(storage_long); - coin::burn_for_testing(fake_coin); - system - } -} diff --git a/contracts/blob_store/sources/tests/bls_tests.move b/contracts/blob_store/sources/tests/bls_tests.move deleted file mode 100644 index 27a39aa5..00000000 --- a/contracts/blob_store/sources/tests/bls_tests.move +++ /dev/null @@ -1,205 +0,0 @@ -// editorconfig-checker-disable-file -// Data here autogenerated by python file - -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::bls_tests { - - use sui::bls12381::bls12381_min_pk_verify; - - use blob_store::bls_aggregate::{Self, BlsCommittee, new_bls_committee, verify_certificate}; - use blob_store::storage_node; - - #[test] - public fun test_basic_compatibility(){ - - // Check the basic python compatibility - - let pub_key_bytes = vector[142, 78, 70, 3, 179, 142, 145, 75, 170, 36, 5, 232, 153, 164, 205, 57, 24, 216, 208, 34, 87, 213, 225, 76, 5, 157, 212, 88, 161, 34, 75, 145, 206, 144, 85, 11, 197, 110, 75, 175, 215, 194, 78, 51, 192, 196, 59, 204]; - let message = vector[104, 101, 108, 108, 111]; - let signature = vector[167, 32, 44, 82, 208, 22, 233, 67, 235, 217, 254, 68, 183, 43, 226, 203, 148, 213, 13, 105, 152, 28, 1, 169, 159, 62, 217, 47, 175, 237, 162, 94, 2, 38, 239, 56, 181, 123, 19, 123, 93, 253, 16, 64, 9, 109, 42, 3, 14, 11, 80, 109, 92, 8, 61, 88, 246, 66, 65, 15, 235, 232, 216, 240, 96, 192, 77, 134, 179, 40, 232, 125, 35, 136, 196, 16, 24, 52, 145, 128, 9, 42, 206, 191, 49, 91, 139, 252, 25, 5, 167, 199, 132, 203, 25, 154]; - - assert!(bls12381_min_pk_verify( - &signature, - &pub_key_bytes, - &message), 0); - - } - - #[test] - public fun test_check_aggregate(): BlsCommittee { - let pk0 = vector[166, 14, 117, 25, 14, 98, 182, 165, 65, 66, 209, 71, 40, 154, 115, 92, 76, 225, 26, 157, 153, 117, 67, 218, 83, 154, 61, 181, 125, 239, 94, 216, 59, 164, 11, 116, 229, 80, 101, 240, 43, 53, 170, 29, 80, 76, 64, 75]; - let pk1 = vector[174, 18, 3, 148, 89, 198, 4, 145, 103, 43, 106, 98, 130, 53, 93, 135, 101, 186, 98, 114, 56, 127, 185, 26, 62, 150, 4, 250, 42, 129, 69, 12, 241, 107, 135, 11, 180, 70, 252, 58, 62, 10, 24, 127, 255, 111, 137, 69]; - let pk2 = vector[148, 123, 50, 124, 138, 21, 179, 150, 52, 164, 38, 175, 112, 192, 98, 181, 6, 50, 167, 68, 237, 221, 65, 181, 164, 104, 100, 20, 239, 76, 217, 116, 107, 177, 29, 10, 83, 198, 194, 255, 33, 187, 207, 51, 30, 7, 172, 146]; - let pk3 = vector[133, 252, 74, 229, 67, 202, 22, 36, 116, 88, 110, 118, 215, 44, 71, 208, 21, 28, 60, 183, 183, 126, 130, 200, 126, 85, 74, 191, 114, 84, 142, 46, 116, 107, 198, 117, 128, 91, 104, 139, 80, 22, 38, 158, 24, 255, 66, 80]; - let pk4 = vector[140, 170, 13, 232, 98, 121, 62, 86, 124, 96, 80, 170, 130, 45, 178, 214, 203, 43, 82, 11, 198, 43, 109, 188, 186, 126, 119, 48, 103, 237, 9, 199, 186, 2, 130, 215, 194, 14, 1, 80, 12, 108, 47, 167, 100, 8, 173, 237]; - let pk5 = vector[170, 39, 63, 208, 83, 35, 225, 56, 30, 16, 233, 62, 104, 60, 52, 100, 115, 40, 18, 112, 32, 179, 80, 127, 200, 205, 220, 51, 112, 56, 227, 63, 189, 122, 153, 239, 13, 44, 123, 106, 39, 141, 127, 129, 22, 22, 37, 96]; - let pk6 = vector[143, 206, 207, 249, 174, 4, 144, 247, 35, 18, 56, 34, 198, 111, 54, 153, 109, 35, 116, 144, 214, 118, 158, 230, 143, 159, 122, 125, 161, 198, 186, 200, 181, 195, 208, 196, 52, 142, 140, 232, 252, 61, 81, 89, 248, 51, 52, 132]; - let pk7 = vector[143, 79, 254, 129, 165, 12, 241, 23, 6, 156, 154, 102, 173, 159, 39, 118, 238, 234, 233, 79, 224, 43, 162, 160, 249, 89, 108, 183, 152, 249, 229, 189, 244, 113, 159, 206, 170, 97, 116, 111, 254, 36, 8, 242, 91, 86, 217, 110]; - let pk8 = vector[135, 133, 64, 95, 39, 94, 226, 253, 147, 78, 131, 131, 90, 121, 186, 101, 31, 128, 176, 244, 50, 223, 27, 128, 99, 80, 220, 148, 156, 22, 156, 96, 230, 7, 103, 228, 31, 174, 216, 234, 172, 94, 208, 233, 226, 16, 120, 124]; - let pk9 = vector[128, 173, 226, 9, 19, 120, 41, 58, 99, 213, 83, 40, 206, 242, 55, 54, 244, 219, 220, 73, 189, 60, 7, 135, 184, 193, 140, 214, 168, 221, 194, 212, 42, 39, 146, 66, 232, 123, 34, 209, 144, 159, 63, 29, 85, 229, 218, 102]; - let message = vector[104, 101, 108, 108, 111]; - - // This is the aggregate sig for keys 0, 1, 2, 3, 4, 5, 6 - let agg_sig = vector[134, 145, 54, 247, 223, 68, 1, 65, 112, 10, 160, 125, 172, 100, 93, 62, 192, 216, 7, 129, 27, 180, 99, 101, 45, 248, 123, 114, 102, 97, 180, 101, 8, 246, 118, 94, 149, 82, 158, 181, 134, 28, 177, 85, 241, 53, 152, 176, 22, 227, 147, 88, 180, 160, 138, 174, 97, 9, 70, 172, 29, 128, 192, 254, 252, 43, 131, 182, 120, 126, 203, 191, 202, 186, 23, 179, 170, 184, 146, 236, 83, 21, 7, 2, 177, 103, 103, 138, 13, 41, 47, 180, 1, 156, 29, 162]; - - // Make a new committee - let committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pk0, 1), storage_node::new_for_testing(pk1, 1), storage_node::new_for_testing(pk2, 1), storage_node::new_for_testing(pk3, 1), storage_node::new_for_testing(pk4, 1), storage_node::new_for_testing(pk5, 1), storage_node::new_for_testing(pk6, 1), storage_node::new_for_testing(pk7, 1), storage_node::new_for_testing(pk8, 1), storage_node::new_for_testing(pk9, 1) - ] - ); - - // Verify the aggregate signature - verify_certificate( - &committee, - &agg_sig, - &vector[0, 1, 2, 3, 4, 5, 6], - &message - ); - - committee - - } - - #[test, expected_failure(abort_code = bls_aggregate::ESigVerification) ] - public fun test_add_members_error(): BlsCommittee { - let pk0 = vector[166, 14, 117, 25, 14, 98, 182, 165, 65, 66, 209, 71, 40, 154, 115, 92, 76, 225, 26, 157, 153, 117, 67, 218, 83, 154, 61, 181, 125, 239, 94, 216, 59, 164, 11, 116, 229, 80, 101, 240, 43, 53, 170, 29, 80, 76, 64, 75]; - let pk1 = vector[174, 18, 3, 148, 89, 198, 4, 145, 103, 43, 106, 98, 130, 53, 93, 135, 101, 186, 98, 114, 56, 127, 185, 26, 62, 150, 4, 250, 42, 129, 69, 12, 241, 107, 135, 11, 180, 70, 252, 58, 62, 10, 24, 127, 255, 111, 137, 69]; - let pk2 = vector[148, 123, 50, 124, 138, 21, 179, 150, 52, 164, 38, 175, 112, 192, 98, 181, 6, 50, 167, 68, 237, 221, 65, 181, 164, 104, 100, 20, 239, 76, 217, 116, 107, 177, 29, 10, 83, 198, 194, 255, 33, 187, 207, 51, 30, 7, 172, 146]; - let pk3 = vector[133, 252, 74, 229, 67, 202, 22, 36, 116, 88, 110, 118, 215, 44, 71, 208, 21, 28, 60, 183, 183, 126, 130, 200, 126, 85, 74, 191, 114, 84, 142, 46, 116, 107, 198, 117, 128, 91, 104, 139, 80, 22, 38, 158, 24, 255, 66, 80]; - let pk4 = vector[140, 170, 13, 232, 98, 121, 62, 86, 124, 96, 80, 170, 130, 45, 178, 214, 203, 43, 82, 11, 198, 43, 109, 188, 186, 126, 119, 48, 103, 237, 9, 199, 186, 2, 130, 215, 194, 14, 1, 80, 12, 108, 47, 167, 100, 8, 173, 237]; - let pk5 = vector[170, 39, 63, 208, 83, 35, 225, 56, 30, 16, 233, 62, 104, 60, 52, 100, 115, 40, 18, 112, 32, 179, 80, 127, 200, 205, 220, 51, 112, 56, 227, 63, 189, 122, 153, 239, 13, 44, 123, 106, 39, 141, 127, 129, 22, 22, 37, 96]; - let pk6 = vector[143, 206, 207, 249, 174, 4, 144, 247, 35, 18, 56, 34, 198, 111, 54, 153, 109, 35, 116, 144, 214, 118, 158, 230, 143, 159, 122, 125, 161, 198, 186, 200, 181, 195, 208, 196, 52, 142, 140, 232, 252, 61, 81, 89, 248, 51, 52, 132]; - let pk7 = vector[143, 79, 254, 129, 165, 12, 241, 23, 6, 156, 154, 102, 173, 159, 39, 118, 238, 234, 233, 79, 224, 43, 162, 160, 249, 89, 108, 183, 152, 249, 229, 189, 244, 113, 159, 206, 170, 97, 116, 111, 254, 36, 8, 242, 91, 86, 217, 110]; - let pk8 = vector[135, 133, 64, 95, 39, 94, 226, 253, 147, 78, 131, 131, 90, 121, 186, 101, 31, 128, 176, 244, 50, 223, 27, 128, 99, 80, 220, 148, 156, 22, 156, 96, 230, 7, 103, 228, 31, 174, 216, 234, 172, 94, 208, 233, 226, 16, 120, 124]; - let pk9 = vector[128, 173, 226, 9, 19, 120, 41, 58, 99, 213, 83, 40, 206, 242, 55, 54, 244, 219, 220, 73, 189, 60, 7, 135, 184, 193, 140, 214, 168, 221, 194, 212, 42, 39, 146, 66, 232, 123, 34, 209, 144, 159, 63, 29, 85, 229, 218, 102]; - let message = vector[104, 101, 108, 108, 111]; - let agg_sig = vector[134, 145, 54, 247, 223, 68, 1, 65, 112, 10, 160, 125, 172, 100, 93, 62, 192, 216, 7, 129, 27, 180, 99, 101, 45, 248, 123, 114, 102, 97, 180, 101, 8, 246, 118, 94, 149, 82, 158, 181, 134, 28, 177, 85, 241, 53, 152, 176, 22, 227, 147, 88, 180, 160, 138, 174, 97, 9, 70, 172, 29, 128, 192, 254, 252, 43, 131, 182, 120, 126, 203, 191, 202, 186, 23, 179, 170, 184, 146, 236, 83, 21, 7, 2, 177, 103, 103, 138, 13, 41, 47, 180, 1, 156, 29, 162]; - - // Make a new committee - let committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pk0, 1), storage_node::new_for_testing(pk1, 1), storage_node::new_for_testing(pk2, 1), storage_node::new_for_testing(pk3, 1), storage_node::new_for_testing(pk4, 1), storage_node::new_for_testing(pk5, 1), storage_node::new_for_testing(pk6, 1), storage_node::new_for_testing(pk7, 1), storage_node::new_for_testing(pk8, 1), storage_node::new_for_testing(pk9, 1) - ] - ); - - // Verify the aggregate signature - verify_certificate( - &committee, - &agg_sig, - &vector[0, 1, 2, 3, 4, 5, 6, 7], - &message - ); - - committee - - } - - #[test, expected_failure(abort_code = bls_aggregate::ESigVerification) ] - public fun test_incorrect_signature_error(): BlsCommittee { - let pk0 = vector[166, 14, 117, 25, 14, 98, 182, 165, 65, 66, 209, 71, 40, 154, 115, 92, 76, 225, 26, 157, 153, 117, 67, 218, 83, 154, 61, 181, 125, 239, 94, 216, 59, 164, 11, 116, 229, 80, 101, 240, 43, 53, 170, 29, 80, 76, 64, 75]; - let pk1 = vector[174, 18, 3, 148, 89, 198, 4, 145, 103, 43, 106, 98, 130, 53, 93, 135, 101, 186, 98, 114, 56, 127, 185, 26, 62, 150, 4, 250, 42, 129, 69, 12, 241, 107, 135, 11, 180, 70, 252, 58, 62, 10, 24, 127, 255, 111, 137, 69]; - let pk2 = vector[148, 123, 50, 124, 138, 21, 179, 150, 52, 164, 38, 175, 112, 192, 98, 181, 6, 50, 167, 68, 237, 221, 65, 181, 164, 104, 100, 20, 239, 76, 217, 116, 107, 177, 29, 10, 83, 198, 194, 255, 33, 187, 207, 51, 30, 7, 172, 146]; - let pk3 = vector[133, 252, 74, 229, 67, 202, 22, 36, 116, 88, 110, 118, 215, 44, 71, 208, 21, 28, 60, 183, 183, 126, 130, 200, 126, 85, 74, 191, 114, 84, 142, 46, 116, 107, 198, 117, 128, 91, 104, 139, 80, 22, 38, 158, 24, 255, 66, 80]; - let pk4 = vector[140, 170, 13, 232, 98, 121, 62, 86, 124, 96, 80, 170, 130, 45, 178, 214, 203, 43, 82, 11, 198, 43, 109, 188, 186, 126, 119, 48, 103, 237, 9, 199, 186, 2, 130, 215, 194, 14, 1, 80, 12, 108, 47, 167, 100, 8, 173, 237]; - let pk5 = vector[170, 39, 63, 208, 83, 35, 225, 56, 30, 16, 233, 62, 104, 60, 52, 100, 115, 40, 18, 112, 32, 179, 80, 127, 200, 205, 220, 51, 112, 56, 227, 63, 189, 122, 153, 239, 13, 44, 123, 106, 39, 141, 127, 129, 22, 22, 37, 96]; - let pk6 = vector[143, 206, 207, 249, 174, 4, 144, 247, 35, 18, 56, 34, 198, 111, 54, 153, 109, 35, 116, 144, 214, 118, 158, 230, 143, 159, 122, 125, 161, 198, 186, 200, 181, 195, 208, 196, 52, 142, 140, 232, 252, 61, 81, 89, 248, 51, 52, 132]; - let pk7 = vector[143, 79, 254, 129, 165, 12, 241, 23, 6, 156, 154, 102, 173, 159, 39, 118, 238, 234, 233, 79, 224, 43, 162, 160, 249, 89, 108, 183, 152, 249, 229, 189, 244, 113, 159, 206, 170, 97, 116, 111, 254, 36, 8, 242, 91, 86, 217, 110]; - let pk8 = vector[135, 133, 64, 95, 39, 94, 226, 253, 147, 78, 131, 131, 90, 121, 186, 101, 31, 128, 176, 244, 50, 223, 27, 128, 99, 80, 220, 148, 156, 22, 156, 96, 230, 7, 103, 228, 31, 174, 216, 234, 172, 94, 208, 233, 226, 16, 120, 124]; - let pk9 = vector[128, 173, 226, 9, 19, 120, 41, 58, 99, 213, 83, 40, 206, 242, 55, 54, 244, 219, 220, 73, 189, 60, 7, 135, 184, 193, 140, 214, 168, 221, 194, 212, 42, 39, 146, 66, 232, 123, 34, 209, 144, 159, 63, 29, 85, 229, 218, 102]; - let message = vector[104, 101, 108, 108, 111]; - // BAD SIGNATURE - let agg_sig = vector[133, 145, 54, 247, 223, 68, 1, 65, 112, 10, 160, 125, 172, 100, 93, 62, 192, 216, 7, 129, 27, 180, 99, 101, 45, 248, 123, 114, 102, 97, 180, 101, 8, 246, 118, 94, 149, 82, 158, 181, 134, 28, 177, 85, 241, 53, 152, 176, 22, 227, 147, 88, 180, 160, 138, 174, 97, 9, 70, 172, 29, 128, 192, 254, 252, 43, 131, 182, 120, 126, 203, 191, 202, 186, 23, 179, 170, 184, 146, 236, 83, 21, 7, 2, 177, 103, 103, 138, 13, 41, 47, 180, 1, 156, 29, 162]; - - // Make a new committee - let committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pk0, 1), storage_node::new_for_testing(pk1, 1), storage_node::new_for_testing(pk2, 1), storage_node::new_for_testing(pk3, 1), storage_node::new_for_testing(pk4, 1), storage_node::new_for_testing(pk5, 1), storage_node::new_for_testing(pk6, 1), storage_node::new_for_testing(pk7, 1), storage_node::new_for_testing(pk8, 1), storage_node::new_for_testing(pk9, 1) - ] - ); - - // Verify the aggregate signature - verify_certificate( - &committee, - &agg_sig, - &vector[0, 1, 2, 3, 4, 5, 6], - &message - ); - - committee - - } - - #[test, expected_failure(abort_code = bls_aggregate::ETotalMemberOrder) ] - public fun test_duplicate_member_error(): BlsCommittee { - let pk0 = vector[166, 14, 117, 25, 14, 98, 182, 165, 65, 66, 209, 71, 40, 154, 115, 92, 76, 225, 26, 157, 153, 117, 67, 218, 83, 154, 61, 181, 125, 239, 94, 216, 59, 164, 11, 116, 229, 80, 101, 240, 43, 53, 170, 29, 80, 76, 64, 75]; - let pk1 = vector[174, 18, 3, 148, 89, 198, 4, 145, 103, 43, 106, 98, 130, 53, 93, 135, 101, 186, 98, 114, 56, 127, 185, 26, 62, 150, 4, 250, 42, 129, 69, 12, 241, 107, 135, 11, 180, 70, 252, 58, 62, 10, 24, 127, 255, 111, 137, 69]; - let pk2 = vector[148, 123, 50, 124, 138, 21, 179, 150, 52, 164, 38, 175, 112, 192, 98, 181, 6, 50, 167, 68, 237, 221, 65, 181, 164, 104, 100, 20, 239, 76, 217, 116, 107, 177, 29, 10, 83, 198, 194, 255, 33, 187, 207, 51, 30, 7, 172, 146]; - let pk3 = vector[133, 252, 74, 229, 67, 202, 22, 36, 116, 88, 110, 118, 215, 44, 71, 208, 21, 28, 60, 183, 183, 126, 130, 200, 126, 85, 74, 191, 114, 84, 142, 46, 116, 107, 198, 117, 128, 91, 104, 139, 80, 22, 38, 158, 24, 255, 66, 80]; - let pk4 = vector[140, 170, 13, 232, 98, 121, 62, 86, 124, 96, 80, 170, 130, 45, 178, 214, 203, 43, 82, 11, 198, 43, 109, 188, 186, 126, 119, 48, 103, 237, 9, 199, 186, 2, 130, 215, 194, 14, 1, 80, 12, 108, 47, 167, 100, 8, 173, 237]; - let pk5 = vector[170, 39, 63, 208, 83, 35, 225, 56, 30, 16, 233, 62, 104, 60, 52, 100, 115, 40, 18, 112, 32, 179, 80, 127, 200, 205, 220, 51, 112, 56, 227, 63, 189, 122, 153, 239, 13, 44, 123, 106, 39, 141, 127, 129, 22, 22, 37, 96]; - let pk6 = vector[143, 206, 207, 249, 174, 4, 144, 247, 35, 18, 56, 34, 198, 111, 54, 153, 109, 35, 116, 144, 214, 118, 158, 230, 143, 159, 122, 125, 161, 198, 186, 200, 181, 195, 208, 196, 52, 142, 140, 232, 252, 61, 81, 89, 248, 51, 52, 132]; - let pk7 = vector[143, 79, 254, 129, 165, 12, 241, 23, 6, 156, 154, 102, 173, 159, 39, 118, 238, 234, 233, 79, 224, 43, 162, 160, 249, 89, 108, 183, 152, 249, 229, 189, 244, 113, 159, 206, 170, 97, 116, 111, 254, 36, 8, 242, 91, 86, 217, 110]; - let pk8 = vector[135, 133, 64, 95, 39, 94, 226, 253, 147, 78, 131, 131, 90, 121, 186, 101, 31, 128, 176, 244, 50, 223, 27, 128, 99, 80, 220, 148, 156, 22, 156, 96, 230, 7, 103, 228, 31, 174, 216, 234, 172, 94, 208, 233, 226, 16, 120, 124]; - let pk9 = vector[128, 173, 226, 9, 19, 120, 41, 58, 99, 213, 83, 40, 206, 242, 55, 54, 244, 219, 220, 73, 189, 60, 7, 135, 184, 193, 140, 214, 168, 221, 194, 212, 42, 39, 146, 66, 232, 123, 34, 209, 144, 159, 63, 29, 85, 229, 218, 102]; - let message = vector[104, 101, 108, 108, 111]; - // BAD SIGNATURE - let agg_sig = vector[134, 145, 54, 247, 223, 68, 1, 65, 112, 10, 160, 125, 172, 100, 93, 62, 192, 216, 7, 129, 27, 180, 99, 101, 45, 248, 123, 114, 102, 97, 180, 101, 8, 246, 118, 94, 149, 82, 158, 181, 134, 28, 177, 85, 241, 53, 152, 176, 22, 227, 147, 88, 180, 160, 138, 174, 97, 9, 70, 172, 29, 128, 192, 254, 252, 43, 131, 182, 120, 126, 203, 191, 202, 186, 23, 179, 170, 184, 146, 236, 83, 21, 7, 2, 177, 103, 103, 138, 13, 41, 47, 180, 1, 156, 29, 162]; - - // Make a new committee - let committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pk0, 1), storage_node::new_for_testing(pk1, 1), storage_node::new_for_testing(pk2, 1), storage_node::new_for_testing(pk3, 1), storage_node::new_for_testing(pk4, 1), storage_node::new_for_testing(pk5, 1), storage_node::new_for_testing(pk6, 1), storage_node::new_for_testing(pk7, 1), storage_node::new_for_testing(pk8, 1), storage_node::new_for_testing(pk9, 1) - ] - ); - - // Verify the aggregate signature - verify_certificate( - &committee, - &agg_sig, - &vector[0, 1, 2, 3, 3, 5, 6], - &message - ); - - committee - - } - - #[test, expected_failure(abort_code = bls_aggregate::ENotEnoughStake) ] - public fun test_incorrect_stake_error(): BlsCommittee { - let pk0 = vector[166, 14, 117, 25, 14, 98, 182, 165, 65, 66, 209, 71, 40, 154, 115, 92, 76, 225, 26, 157, 153, 117, 67, 218, 83, 154, 61, 181, 125, 239, 94, 216, 59, 164, 11, 116, 229, 80, 101, 240, 43, 53, 170, 29, 80, 76, 64, 75]; - let pk1 = vector[174, 18, 3, 148, 89, 198, 4, 145, 103, 43, 106, 98, 130, 53, 93, 135, 101, 186, 98, 114, 56, 127, 185, 26, 62, 150, 4, 250, 42, 129, 69, 12, 241, 107, 135, 11, 180, 70, 252, 58, 62, 10, 24, 127, 255, 111, 137, 69]; - let pk2 = vector[148, 123, 50, 124, 138, 21, 179, 150, 52, 164, 38, 175, 112, 192, 98, 181, 6, 50, 167, 68, 237, 221, 65, 181, 164, 104, 100, 20, 239, 76, 217, 116, 107, 177, 29, 10, 83, 198, 194, 255, 33, 187, 207, 51, 30, 7, 172, 146]; - let pk3 = vector[133, 252, 74, 229, 67, 202, 22, 36, 116, 88, 110, 118, 215, 44, 71, 208, 21, 28, 60, 183, 183, 126, 130, 200, 126, 85, 74, 191, 114, 84, 142, 46, 116, 107, 198, 117, 128, 91, 104, 139, 80, 22, 38, 158, 24, 255, 66, 80]; - let pk4 = vector[140, 170, 13, 232, 98, 121, 62, 86, 124, 96, 80, 170, 130, 45, 178, 214, 203, 43, 82, 11, 198, 43, 109, 188, 186, 126, 119, 48, 103, 237, 9, 199, 186, 2, 130, 215, 194, 14, 1, 80, 12, 108, 47, 167, 100, 8, 173, 237]; - let pk5 = vector[170, 39, 63, 208, 83, 35, 225, 56, 30, 16, 233, 62, 104, 60, 52, 100, 115, 40, 18, 112, 32, 179, 80, 127, 200, 205, 220, 51, 112, 56, 227, 63, 189, 122, 153, 239, 13, 44, 123, 106, 39, 141, 127, 129, 22, 22, 37, 96]; - let pk6 = vector[143, 206, 207, 249, 174, 4, 144, 247, 35, 18, 56, 34, 198, 111, 54, 153, 109, 35, 116, 144, 214, 118, 158, 230, 143, 159, 122, 125, 161, 198, 186, 200, 181, 195, 208, 196, 52, 142, 140, 232, 252, 61, 81, 89, 248, 51, 52, 132]; - let pk7 = vector[143, 79, 254, 129, 165, 12, 241, 23, 6, 156, 154, 102, 173, 159, 39, 118, 238, 234, 233, 79, 224, 43, 162, 160, 249, 89, 108, 183, 152, 249, 229, 189, 244, 113, 159, 206, 170, 97, 116, 111, 254, 36, 8, 242, 91, 86, 217, 110]; - let pk8 = vector[135, 133, 64, 95, 39, 94, 226, 253, 147, 78, 131, 131, 90, 121, 186, 101, 31, 128, 176, 244, 50, 223, 27, 128, 99, 80, 220, 148, 156, 22, 156, 96, 230, 7, 103, 228, 31, 174, 216, 234, 172, 94, 208, 233, 226, 16, 120, 124]; - let pk9 = vector[128, 173, 226, 9, 19, 120, 41, 58, 99, 213, 83, 40, 206, 242, 55, 54, 244, 219, 220, 73, 189, 60, 7, 135, 184, 193, 140, 214, 168, 221, 194, 212, 42, 39, 146, 66, 232, 123, 34, 209, 144, 159, 63, 29, 85, 229, 218, 102]; - let message = vector[104, 101, 108, 108, 111]; - // BAD SIGNATURE - let agg_sig = vector[134, 145, 54, 247, 223, 68, 1, 65, 112, 10, 160, 125, 172, 100, 93, 62, 192, 216, 7, 129, 27, 180, 99, 101, 45, 248, 123, 114, 102, 97, 180, 101, 8, 246, 118, 94, 149, 82, 158, 181, 134, 28, 177, 85, 241, 53, 152, 176, 22, 227, 147, 88, 180, 160, 138, 174, 97, 9, 70, 172, 29, 128, 192, 254, 252, 43, 131, 182, 120, 126, 203, 191, 202, 186, 23, 179, 170, 184, 146, 236, 83, 21, 7, 2, 177, 103, 103, 138, 13, 41, 47, 180, 1, 156, 29, 162]; - - // Make a new committee - let committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pk0, 1), storage_node::new_for_testing(pk1, 2), storage_node::new_for_testing(pk2, 2), storage_node::new_for_testing(pk3, 2), storage_node::new_for_testing(pk4, 2), storage_node::new_for_testing(pk5, 2), storage_node::new_for_testing(pk6, 2), storage_node::new_for_testing(pk7, 2), storage_node::new_for_testing(pk8, 2), storage_node::new_for_testing(pk9, 3) - ] - ); - - // Verify the aggregate signature - verify_certificate( - &committee, - &agg_sig, - &vector[0, 1, 2, 3, 4, 5, 6], - &message - ); - - committee - - } -} diff --git a/contracts/blob_store/sources/tests/committee_cert_tests.move b/contracts/blob_store/sources/tests/committee_cert_tests.move deleted file mode 100644 index 17f84f13..00000000 --- a/contracts/blob_store/sources/tests/committee_cert_tests.move +++ /dev/null @@ -1,75 +0,0 @@ -// editorconfig-checker-disable-file -// Data here autogenerated by python file - -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::committee_cert_tests { - - use sui::bls12381::{bls12381_min_pk_verify}; - - use blob_store::bls_aggregate::new_bls_committee; - use blob_store::committee::{Self, Committee, committee_for_testing_with_bls , verify_quorum_in_epoch}; - use blob_store::storage_node; - - #[test] - public fun test_basic_correct() : Committee { - - let pub_key_bytes = vector[142, 78, 70, 3, 179, 142, 145, 75, 170, 36, 5, 232, 153, 164, 205, 57, 24, 216, 208, 34, 87, 213, 225, 76, 5, 157, 212, 88, 161, 34, 75, 145, 206, 144, 85, 11, 197, 110, 75, 175, 215, 194, 78, 51, 192, 196, 59, 204]; - let message = vector[1, 0, 3, 5, 0, 0, 0, 0, 0, 0, 0, 104, 101, 108, 108, 111]; - let signature = vector[173, 231, 27, 143, 41, 154, 49, 14, 85, 88, 187, 65, 86, 190, 161, 255, 219, 210, 78, 88, 179, 53, 11, 104, 168, 220, 40, 13, 91, 254, 191, 116, 161, 252, 196, 19, 24, 153, 126, 248, 68, 136, 245, 85, 144, 17, 163, 161, 10, 195, 145, 26, 88, 205, 255, 211, 19, 42, 132, 34, 230, 155, 148, 10, 173, 151, 182, 93, 50, 73, 126, 112, 119, 153, 116, 80, 198, 215, 82, 228, 9, 186, 90, 83, 85, 143, 155, 191, 109, 190, 84, 129, 178, 100, 228, 118]; - - assert!(bls12381_min_pk_verify( - &signature, - &pub_key_bytes, - &message), 0); - - // Make a new committee - let bls_committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pub_key_bytes, 10) - ] - ); - - // actual committee - - let committee_at_5 : Committee = committee_for_testing_with_bls(5, bls_committee); - let cert = verify_quorum_in_epoch(&committee_at_5, signature, vector[0], message); - - assert!(committee::intent_type(&cert) == 1, 0); - - committee_at_5 - } - - #[test, expected_failure] - public fun test_incorrect_epoch() : Committee { - - let pub_key_bytes = vector[142, 78, 70, 3, 179, 142, 145, 75, 170, 36, 5, 232, 153, 164, 205, 57, 24, 216, 208, 34, 87, 213, 225, 76, 5, 157, 212, 88, 161, 34, 75, 145, 206, 144, 85, 11, 197, 110, 75, 175, 215, 194, 78, 51, 192, 196, 59, 204]; - let message = vector[1, 0, 3, 5, 0, 0, 0, 0, 0, 0, 0, 104, 101, 108, 108, 111]; - let signature = vector[173, 231, 27, 143, 41, 154, 49, 14, 85, 88, 187, 65, 86, 190, 161, 255, 219, 210, 78, 88, 179, 53, 11, 104, 168, 220, 40, 13, 91, 254, 191, 116, 161, 252, 196, 19, 24, 153, 126, 248, 68, 136, 245, 85, 144, 17, 163, 161, 10, 195, 145, 26, 88, 205, 255, 211, 19, 42, 132, 34, 230, 155, 148, 10, 173, 151, 182, 93, 50, 73, 126, 112, 119, 153, 116, 80, 198, 215, 82, 228, 9, 186, 90, 83, 85, 143, 155, 191, 109, 190, 84, 129, 178, 100, 228, 118]; - - assert!(bls12381_min_pk_verify( - &signature, - &pub_key_bytes, - &message), 0); - - // Make a new committee - let bls_committee = new_bls_committee( - vector[ - storage_node::new_for_testing(pub_key_bytes, 10), - ] - ); - - // actual committee - - // INCORRECT EPOCH - let committee_at_6 : Committee = committee_for_testing_with_bls(6, bls_committee); - let cert = verify_quorum_in_epoch(&committee_at_6, signature, vector[0], message); - - assert!(committee::intent_type(&cert) == 1, 0); - - committee_at_6 - } - -} diff --git a/contracts/blob_store/sources/tests/epoch_change_tests.move b/contracts/blob_store/sources/tests/epoch_change_tests.move deleted file mode 100644 index c74d577c..00000000 --- a/contracts/blob_store/sources/tests/epoch_change_tests.move +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::epoch_change_tests { - use sui::coin; - use sui::balance; - - use blob_store::committee; - use blob_store::system; - use blob_store::storage_accounting as sa; - use blob_store::storage_resource as sr; - - // Keep in sync with the same constant in `blob_store::system` - const BYTES_PER_UNIT_SIZE: u64 = 1_024; - - public struct TESTWAL has store, drop {} - - // ------------- TESTS -------------------- - - #[test] - public fun test_use_system(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new( - committee, - 1_000 * BYTES_PER_UNIT_SIZE, - 2, - &mut ctx, - ); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 10 * BYTES_PER_UNIT_SIZE, - 3, - fake_coin, - &mut ctx, - ); - sr::destroy(storage); - - // Check things about the system - assert!(system::epoch(&system) == 0, 0); - - // The value of the coin should be 100 - 60 - assert!(coin::value(&fake_coin) == 40, 0); - - // Space is reduced by 10 - assert!(system::used_capacity_size(&system) == 10 * BYTES_PER_UNIT_SIZE, 0); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let mut epoch_accounts = system::next_epoch( - &mut system, - committee, - 1_000 * BYTES_PER_UNIT_SIZE, - 3, - ); - assert!(balance::value(sa::rewards_to_distribute(&mut epoch_accounts)) == 20, 0); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space( - &mut system, - 5 * BYTES_PER_UNIT_SIZE, - 1, - fake_coin, - &mut ctx, - ); - sr::destroy(storage); - // The value of the coin should be 40 - 3 x 5 - assert!(coin::value(&fake_coin) == 25, 0); - sa::burn_for_testing(epoch_accounts); - - assert!(system::used_capacity_size(&system) == 15 * BYTES_PER_UNIT_SIZE, 0); - - // Advance epoch -- to epoch 2 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(2); - let mut epoch_accounts = system::next_epoch( - &mut system, - committee, - 1_000 * BYTES_PER_UNIT_SIZE, - 3, - ); - assert!(balance::value(sa::rewards_to_distribute(&mut epoch_accounts)) == 35, 0); - sa::burn_for_testing(epoch_accounts); - - assert!(system::used_capacity_size(&system) == 10 * BYTES_PER_UNIT_SIZE, 0); - - // Advance epoch -- to epoch 3 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(3); - let mut epoch_accounts = system::next_epoch( - &mut system, - committee, - 1_000 * BYTES_PER_UNIT_SIZE, - 3, - ); - assert!(balance::value(sa::rewards_to_distribute(&mut epoch_accounts)) == 20, 0); - sa::burn_for_testing(epoch_accounts); - - // check all space is reclaimed - assert!(system::used_capacity_size(&system) == 0, 0); - - // Advance epoch -- to epoch 4 - system::set_done_for_testing(&mut system); - let committee = committee::committee_for_testing(4); - let mut epoch_accounts = system::next_epoch( - &mut system, - committee, - 1_000 * BYTES_PER_UNIT_SIZE, - 3, - ); - assert!(balance::value(sa::rewards_to_distribute(&mut epoch_accounts)) == 0, 0); - sa::burn_for_testing(epoch_accounts); - - // check all space is reclaimed - assert!(system::used_capacity_size(&system) == 0, 0); - - coin::burn_for_testing(fake_coin); - - system - } - - #[test, expected_failure(abort_code=system::ESyncEpochChange)] - public fun test_move_sync_err_system(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000, 2, &mut ctx); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts1 = system::next_epoch(&mut system, committee, 1000, 3); - - // Advance epoch -- to epoch 2 - let committee = committee::committee_for_testing(2); - // FAIL HERE BECAUSE WE ARE IN SYNC MODE NOT DONE! - let epoch_accounts2 = system::next_epoch(&mut system, committee, 1000, 3); - - coin::burn_for_testing(fake_coin); - sa::burn_for_testing(epoch_accounts1); - sa::burn_for_testing(epoch_accounts2); - - system - } - - #[test, expected_failure(abort_code=system::EStorageExceeded)] - public fun test_fail_capacity_system(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000, 2, &mut ctx); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space(&mut system, 10, 3, fake_coin, &mut ctx); - sr::destroy(storage); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000, 3); - - // Get some space for a few epochs - let (storage, fake_coin) = system::reserve_space(&mut system, 995, 1, fake_coin, &mut ctx); - sr::destroy(storage); - // The value of the coin should be 40 - 3 x 5 - sa::burn_for_testing(epoch_accounts); - - coin::burn_for_testing(fake_coin); - - system - } - - #[test] - public fun test_sync_done_happy(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000, 2, &mut ctx); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts1 = system::next_epoch(&mut system, committee, 1000, 3); - - // Construct a test sync_done test message - let test_sync_done_msg = system::make_sync_done_message_for_testing(1); - - // Feed it into the logic to advance state - system::sync_done_for_epoch(&mut system, test_sync_done_msg); - - // Advance epoch -- to epoch 2 - let committee = committee::committee_for_testing(2); - // We are in done state and this works - let epoch_accounts2 = system::next_epoch(&mut system, committee, 1000, 3); - - coin::burn_for_testing(fake_coin); - sa::burn_for_testing(epoch_accounts1); - sa::burn_for_testing(epoch_accounts2); - - system - } - - #[test, expected_failure(abort_code=system::ESyncEpochChange)] - public fun test_sync_done_unhappy(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000, 2, &mut ctx); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts1 = system::next_epoch(&mut system, committee, 1000, 3); - - // Construct a test sync_done test message -- INCORRECT EPOCH - let test_sync_done_msg = system::make_sync_done_message_for_testing(4); - - // Feed it into the logic to advance state - system::sync_done_for_epoch(&mut system, test_sync_done_msg); - - coin::burn_for_testing(fake_coin); - sa::burn_for_testing(epoch_accounts1); - - system - } - - #[test, expected_failure(abort_code=system::ESyncEpochChange)] - public fun test_twice_unhappy(): system::System { - let mut ctx = tx_context::dummy(); - - // A test coin. - let fake_coin = coin::mint_for_testing(100, &mut ctx); - - // Create a new committee - let committee = committee::committee_for_testing(0); - - // Create a new system object - let mut system: system::System = system::new(committee, 1000, 2, &mut ctx); - - // Advance epoch -- to epoch 1 - let committee = committee::committee_for_testing(1); - let epoch_accounts1 = system::next_epoch(&mut system, committee, 1000, 3); - - // Construct a test sync_done test message - // Feed it into the logic to advance state - let test_sync_done_msg = system::make_sync_done_message_for_testing(1); - system::sync_done_for_epoch(&mut system, test_sync_done_msg); - - // SECOND TIME -- FAILS - let test_sync_done_msg = system::make_sync_done_message_for_testing(1); - system::sync_done_for_epoch(&mut system, test_sync_done_msg); - - coin::burn_for_testing(fake_coin); - sa::burn_for_testing(epoch_accounts1); - - system - } -} diff --git a/contracts/blob_store/sources/tests/invalid_tests.move b/contracts/blob_store/sources/tests/invalid_tests.move deleted file mode 100644 index 90765d54..00000000 --- a/contracts/blob_store/sources/tests/invalid_tests.move +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::invalid_tests { - - use std::string; - - use blob_store::committee; - use blob_store::system; - use blob_store::storage_node; - use blob_store::storage_accounting as sa; - - const NETWORK_PUBLIC_KEY: vector = - x"820e2b273530a00de66c9727c40f48be985da684286983f398ef7695b8a44677"; - public struct TESTWAL has store, drop {} - - - #[test] - public fun test_invalid_blob_ok() : committee::Committee { - - let blob_id : u256 = 0xabababababababababababababababababababababababababababababababab; - - // BCS confirmation message for epoch 0 and blob id `blob_id` with intents - let invalid_message : vector = vector[2, 0, 3, 5, 0, 0, 0, 0, 0, 0, 0, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171]; - - // Signature from private key scalar(117) on `invalid message` - let message_signature : vector = vector[ - 143, 92, 248, 128, 87, 79, 148, 183, 217, 204, 80, 23, 165, 20, 177, 244, 195, 58, 211, - 68, 96, 54, 23, 17, 187, 131, 69, 35, 243, 61, 209, 23, 11, 75, 236, 235, 199, 245, 53, - 10, 120, 47, 152, 39, 205, 152, 188, 230, 12, 213, 35, 133, 121, 27, 238, 80, 93, 35, - 241, 26, 55, 151, 38, 190, 131, 149, 149, 89, 134, 115, 85, 8, 133, 11, 220, 82, 100, - 14, 214, 146, 147, 200, 192, 155, 181, 143, 199, 38, 202, 125, 25, 22, 246, 117, 30, 82 - ]; - - // Create storage node - // Pk corresponding to secret key scalar(117) - let public_key : vector = vector[ - 149, 234, 204, 58, 220, 9, 200, 39, 89, 63, 88, 30, 142, 45, - 224, 104, 191, 76, 245, 208, 192, 235, 41, 229, 55, 47, 13, 35, 54, 71, 136, 238, 15, - 155, 235, 17, 44, 138, 126, 156, 47, 12, 114, 4, 51, 112, 92, 240]; - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - - // Create a new committee - let cap = committee::create_committee_cap_for_tests(); - let committee = committee::create_committee(&cap, 5, vector[storage_node]); - - let certified_message = committee::verify_quorum_in_epoch( - &committee, - message_signature, - vector[0], - invalid_message,); - - // Now check this is a invalid blob message - let invalid_blob = system::invalid_blob_id_message(certified_message); - assert!(system::invalid_blob_id(&invalid_blob) == blob_id, 0); - - committee - } - - #[test] - public fun test_system_invalid_id_happy() : system::System { - - let mut ctx = tx_context::dummy(); - - // Create storage node - // Pk corresponding to secret key scalar(117) - let public_key : vector = vector[ - 149, 234, 204, 58, 220, 9, 200, 39, 89, 63, 88, 30, 142, 45, - 224, 104, 191, 76, 245, 208, 192, 235, 41, 229, 55, 47, 13, 35, 54, 71, 136, 238, 15, - 155, 235, 17, 44, 138, 126, 156, 47, 12, 114, 4, 51, 112, 92, 240]; - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - - // Create a new committee - let cap = committee::create_committee_cap_for_tests(); - let committee = committee::create_committee(&cap, 0, vector[storage_node]); - - // Create a new system object - let mut system : system::System = system::new(committee, - 1000000000, 5, &mut ctx); - - let mut epoch = 0; - - loop { - - epoch = epoch + 1; - - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - let committee = committee::create_committee(&cap, epoch, vector[storage_node]); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000000000, 5); - system::set_done_for_testing(&mut system); - sa::burn_for_testing(epoch_accounts); - - if (epoch == 5) { - break - } - - }; - - let certified_message = committee::certified_message_for_testing( - 2, // Intent type - 0, // Intent version - 5, // Epoch - 6, // Stake support - // Data - vector[171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171], - - ); - - let blob_id : u256 = 0xabababababababababababababababababababababababababababababababab; - - // Now check this is a invalid blob message - let invalid_blob = system::invalid_blob_id_message(certified_message); - assert!(system::invalid_blob_id(&invalid_blob) == blob_id, 0); - - // Now use the system to check the invalid blob - system::inner_declare_invalid_blob_id(&system, invalid_blob); - - system - } - - #[test] - public fun test_invalidate_happy() : system::System { - - let mut ctx = tx_context::dummy(); - - // Create storage node - // Pk corresponding to secret key scalar(117) - let public_key : vector = vector[ - 149, 234, 204, 58, 220, 9, 200, 39, 89, 63, 88, 30, 142, 45, - 224, 104, 191, 76, 245, 208, 192, 235, 41, 229, 55, 47, 13, 35, 54, 71, 136, 238, 15, - 155, 235, 17, 44, 138, 126, 156, 47, 12, 114, 4, 51, 112, 92, 240]; - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - - // Create a new committee - let cap = committee::create_committee_cap_for_tests(); - let committee = committee::create_committee(&cap, 0, vector[storage_node]); - - // Create a new system object - let mut system : system::System = system::new(committee, - 1000000000, 5, &mut ctx); - - let mut epoch = 0; - - loop { - - epoch = epoch + 1; - - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - let committee = committee::create_committee(&cap, epoch, vector[storage_node]); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000000000, 5); - system::set_done_for_testing(&mut system); - sa::burn_for_testing(epoch_accounts); - - if (epoch == 5) { - break - } - - }; - - // BCS confirmation message for epoch 0 and blob id `blob_id` with intents - let invalid_message : vector = vector[2, 0, 3, 5, 0, 0, 0, 0, 0, 0, 0, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171]; - - // Signature from private key scalar(117) on `invalid message` - let message_signature : vector = vector[ - 143, 92, 248, 128, 87, 79, 148, 183, 217, 204, 80, 23, 165, 20, 177, 244, 195, 58, 211, - 68, 96, 54, 23, 17, 187, 131, 69, 35, 243, 61, 209, 23, 11, 75, 236, 235, 199, 245, 53, - 10, 120, 47, 152, 39, 205, 152, 188, 230, 12, 213, 35, 133, 121, 27, 238, 80, 93, 35, - 241, 26, 55, 151, 38, 190, 131, 149, 149, 89, 134, 115, 85, 8, 133, 11, 220, 82, 100, - 14, 214, 146, 147, 200, 192, 155, 181, 143, 199, 38, 202, 125, 25, 22, 246, 117, 30, 82 - ]; - - - - let expected_blob_id : u256 - = 0xabababababababababababababababababababababababababababababababab; - - // Now check this is a invalid blob message - let blob_id = system::invalidate_blob_id( - &system, - message_signature, - vector[0], - invalid_message); - - assert!(blob_id == expected_blob_id, 0); - - system - } - - - #[test, expected_failure(abort_code=system::EInvalidIdEpoch)] - public fun test_system_invalid_id_wrong_epoch() : system::System { - - let mut ctx = tx_context::dummy(); - - // Create storage node - // Pk corresponding to secret key scalar(117) - let public_key : vector = vector[ - 149, 234, 204, 58, 220, 9, 200, 39, 89, 63, 88, 30, 142, 45, - 224, 104, 191, 76, 245, 208, 192, 235, 41, 229, 55, 47, 13, 35, 54, 71, 136, 238, 15, - 155, 235, 17, 44, 138, 126, 156, 47, 12, 114, 4, 51, 112, 92, 240]; - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - - // Create a new committee - let cap = committee::create_committee_cap_for_tests(); - let committee = committee::create_committee(&cap, 0, vector[storage_node]); - - // Create a new system object - let mut system : system::System = system::new(committee, - 1000000000, 5, &mut ctx); - - let mut epoch = 0; - - loop { - - epoch = epoch + 1; - - let storage_node = storage_node::create_storage_node_info( - string::utf8(b"node"), - string::utf8(b"127.0.0.1"), - public_key, - NETWORK_PUBLIC_KEY, - vector[0, 1, 2, 3, 4, 5] - ); - let committee = committee::create_committee(&cap, epoch, vector[storage_node]); - let epoch_accounts = system::next_epoch(&mut system, committee, 1000000000, 5); - system::set_done_for_testing(&mut system); - sa::burn_for_testing(epoch_accounts); - - if (epoch == 5) { - break - } - - }; - - let certified_message = committee::certified_message_for_testing( - 2, // Intent type - 0, // Intent version - 50, // Epoch WRONG - 6, // Stake support - // Data - vector[171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, - 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171], - - ); - - let blob_id : u256 = 0xabababababababababababababababababababababababababababababababab; - - // Now check this is a invalid blob message - let invalid_blob = system::invalid_blob_id_message(certified_message); - assert!(system::invalid_blob_id(&invalid_blob) == blob_id, 0); - - // Now use the system to check the invalid blob - // BLOWS UP HERE DUE TO WRONG EPOCH - system::inner_declare_invalid_blob_id(&system, invalid_blob); - - system - } - - - -} diff --git a/contracts/blob_store/sources/tests/ringbuffer_tests.move b/contracts/blob_store/sources/tests/ringbuffer_tests.move deleted file mode 100644 index 6e820d19..00000000 --- a/contracts/blob_store/sources/tests/ringbuffer_tests.move +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::ringbuffer_tests { - public struct TESTCOIN has store, drop {} - - use blob_store::storage_accounting as sa; - use blob_store::storage_accounting::FutureAccountingRingBuffer; - - // ------------- TESTS -------------------- - - #[test] - public fun test_basic_ring_buffer(): FutureAccountingRingBuffer { - let mut buffer: FutureAccountingRingBuffer = sa::ring_new(3); - - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 0)) == 0, 100); - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 1)) == 1, 100); - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 2)) == 2, 100); - - let entry = sa::ring_pop_expand(&mut buffer); - assert!(sa::epoch(&entry) == 0, 100); - sa::delete_empty_future_accounting(entry); - - let entry = sa::ring_pop_expand(&mut buffer); - assert!(sa::epoch(&entry) == 1, 100); - sa::delete_empty_future_accounting(entry); - - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 0)) == 2, 100); - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 1)) == 3, 100); - assert!(sa::epoch(sa::ring_lookup_mut(&mut buffer, 2)) == 4, 100); - - buffer - } - - #[test, expected_failure] - public fun test_oob_fail_ring_buffer(): FutureAccountingRingBuffer { - let mut buffer: FutureAccountingRingBuffer = sa::ring_new(3); - - sa::epoch(sa::ring_lookup_mut(&mut buffer, 3)); - - buffer - } -} diff --git a/contracts/blob_store/sources/tests/storage_resource_tests.move b/contracts/blob_store/sources/tests/storage_resource_tests.move deleted file mode 100644 index 5a0b64cd..00000000 --- a/contracts/blob_store/sources/tests/storage_resource_tests.move +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -#[test_only] -module blob_store::storage_resource_tests { - use blob_store::storage_resource::{ - fuse, - split_by_epoch, - split_by_size, - create_for_test, - destroy, - start_epoch, - end_epoch, - storage_size, - EInvalidEpoch, - EIncompatibleAmount, - EIncompatibleEpochs, - }; - - #[test] - public fun test_split_epoch() { - let ctx = &mut tx_context::dummy(); - let storage_amount = 5_000_000; - let mut storage = create_for_test(0, 10, storage_amount, ctx); - let new_storage = split_by_epoch(&mut storage, 7, ctx); - assert!( - start_epoch(&storage) == 0 && end_epoch(&storage) == 7 && - start_epoch(&new_storage) == 7 && - end_epoch(&new_storage) == 10, - 0, - ); - assert!( - storage_size(&storage) == storage_amount && - storage_size(&new_storage) == storage_amount, - 0, - ); - destroy(storage); - destroy(new_storage); - } - - #[test] - public fun test_split_size() { - let ctx = &mut tx_context::dummy(); - let mut storage = create_for_test(0, 10, 5_000_000, ctx); - let new_storage = split_by_size(&mut storage, 1_000_000, ctx); - assert!( - start_epoch(&storage) == 0 && end_epoch(&storage) == 10 && - start_epoch(&new_storage) == 0 && - end_epoch(&new_storage) == 10, - 0, - ); - assert!(storage_size(&storage) == 1_000_000 && storage_size(&new_storage) == 4_000_000, 0); - destroy(storage); - destroy(new_storage); - } - - #[test] - #[expected_failure(abort_code=EInvalidEpoch)] - public fun test_split_epoch_invalid_end() { - let ctx = &mut tx_context::dummy(); - let mut storage = create_for_test(0, 10, 5_000_000, ctx); - let new_storage = split_by_epoch(&mut storage, 11, ctx); - destroy(storage); - destroy(new_storage); - } - - #[test] - #[expected_failure(abort_code=EInvalidEpoch)] - public fun test_split_epoch_invalid_start() { - let ctx = &mut tx_context::dummy(); - let mut storage = create_for_test(1, 10, 5_000_000, ctx); - let new_storage = split_by_epoch(&mut storage, 0, ctx); - destroy(storage); - destroy(new_storage); - } - - #[test] - public fun test_fuse_size() { - let ctx = &mut tx_context::dummy(); - let mut first = create_for_test(0, 10, 1_000_000, ctx); - let second = create_for_test(0, 10, 2_000_000, ctx); - fuse(&mut first, second); - assert!(start_epoch(&first) == 0 && end_epoch(&first) == 10, 0); - assert!(storage_size(&first) == 3_000_000, 0); - destroy(first); - } - - #[test] - public fun test_fuse_epochs() { - let ctx = &mut tx_context::dummy(); - let mut first = create_for_test(0, 5, 1_000_000, ctx); - let second = create_for_test(5, 10, 1_000_000, ctx); - // list the `earlier` resource first - fuse(&mut first, second); - assert!(start_epoch(&first) == 0 && end_epoch(&first) == 10, 0); - assert!(storage_size(&first) == 1_000_000, 0); - - let mut second = create_for_test(10, 15, 1_000_000, ctx); - // list the `latter` resource first - fuse(&mut second, first); - assert!(start_epoch(&second) == 0 && end_epoch(&second) == 15, 0); - assert!(storage_size(&second) == 1_000_000, 0); - destroy(second); - } - - #[test] - #[expected_failure(abort_code=EIncompatibleAmount)] - public fun test_fuse_incompatible_size() { - let ctx = &mut tx_context::dummy(); - let mut first = create_for_test(0, 5, 1_000_000, ctx); - let second = create_for_test(5, 10, 2_000_000, ctx); - fuse(&mut first, second); - destroy(first); - } - - #[test] - #[expected_failure(abort_code=EIncompatibleEpochs)] - public fun test_fuse_incompatible_epochs() { - let ctx = &mut tx_context::dummy(); - let mut first = create_for_test(0, 6, 1_000_000, ctx); - let second = create_for_test(5, 10, 1_000_000, ctx); - fuse(&mut first, second); - destroy(first); - } -} diff --git a/contracts/walrus/Move.lock b/contracts/walrus/Move.lock new file mode 100644 index 00000000..bc645eec --- /dev/null +++ b/contracts/walrus/Move.lock @@ -0,0 +1,45 @@ +# @generated by Move, please check-in and do not edit manually. + +[move] +version = 3 +manifest_digest = "A8705FC4DA1640884D71727CDCE9E5C95451481B9AEAEFF0DF4EBC83042B042A" +deps_digest = "060AD7E57DFB13104F21BE5F5C3759D03F0553FC3229247D9A7A6B45F50D03A3" +dependencies = [ + { id = "Sui", name = "Sui" }, + { id = "WAL", name = "WAL" }, + { id = "WAL_exchange", name = "WAL_exchange" }, +] + +[[move.package]] +id = "MoveStdlib" +source = { git = "https://github.com/MystenLabs/sui.git", rev = "testnet-v1.35.0", subdir = "crates/sui-framework/packages/move-stdlib" } + +[[move.package]] +id = "Sui" +source = { git = "https://github.com/MystenLabs/sui.git", rev = "testnet-v1.35.0", subdir = "crates/sui-framework/packages/sui-framework" } + +dependencies = [ + { id = "MoveStdlib", name = "MoveStdlib" }, +] + +[[move.package]] +id = "WAL" +source = { local = "../wal" } + +dependencies = [ + { id = "Sui", name = "Sui" }, +] + +[[move.package]] +id = "WAL_exchange" +source = { local = "../wal_exchange" } + +dependencies = [ + { id = "Sui", name = "Sui" }, + { id = "WAL", name = "WAL" }, +] + +[move.toolchain-version] +compiler-version = "1.35.0" +edition = "2024.beta" +flavor = "sui" diff --git a/contracts/blob_store/Move.toml b/contracts/walrus/Move.toml similarity index 50% rename from contracts/blob_store/Move.toml rename to contracts/walrus/Move.toml index 4855e94f..aec968ed 100644 --- a/contracts/blob_store/Move.toml +++ b/contracts/walrus/Move.toml @@ -1,10 +1,13 @@ [package] -name = "blob_store" +name = "Walrus" +license = "Apache-2.0" +authors = ["Mysten Labs "] edition = "2024.beta" [dependencies] -Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "testnet-v1.31.1" } +Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "testnet-v1.35.0" } +WAL = { local = "../wal" } +WAL_exchange = { local = "../wal_exchange" } [addresses] -blob_store = "0x0" -sui = "0x2" +walrus = "0x0" diff --git a/contracts/blob_store/README.md b/contracts/walrus/README.md similarity index 62% rename from contracts/blob_store/README.md rename to contracts/walrus/README.md index 3ee9bae6..f80eaa07 100644 --- a/contracts/blob_store/README.md +++ b/contracts/walrus/README.md @@ -1,11 +1,11 @@ # Walrus Testnet Move contracts -> TODO: directory still contains the Devnet contracts, these and the package ID below need to be -> updated for Testnet. + This is the Move source code for the Walrus Testnet instance. We provide this so developers can -experiment with building Walrus apps that require Move extensions. This code is published on Sui -Testnet at package ID `0x7e12d67a52106ddd5f26c6ff4fe740ba5dea7cfc138d5b1d33863ba9098aa6fe`. +experiment with building Walrus apps that require Move extensions. A slightly different version of +these contracts is deployed on Sui Testnet as package +`0x668fb342c7ea45a3a8d645efefbb41d6b732a5fd4ead552f58df7fabe443c12e`. **A word of caution:** Walrus Mainnet will use new Move packages with struct layouts and function signatures that may not be compatible with this package. Move code that builds against this package diff --git a/contracts/blob_store/docs/msg_formats.txt b/contracts/walrus/docs/msg_formats.txt similarity index 82% rename from contracts/blob_store/docs/msg_formats.txt rename to contracts/walrus/docs/msg_formats.txt index 5a4bacfa..48db5879 100644 --- a/contracts/blob_store/docs/msg_formats.txt +++ b/contracts/walrus/docs/msg_formats.txt @@ -16,9 +16,9 @@ Signatures are 96 byte vector. Signed Message Header --------------------- -All messages MUST start with a header of 3 + 8 bytes: +All messages MUST start with a header of 3 + 4 bytes: - (Intent_type, Intent_version, Intent_app_id) : (u8, u8, u8) -- epoch: u64 +- epoch: u32 - body: remaining vec, no length prefix The intent types are enumerated below, the version is as of now 0, @@ -27,14 +27,15 @@ signing storage node is in when signing the message. Intent types (add here): -const SYNC_DONE_MSG_TYPE: u8 = 0; +const PROOF_OF_POSSESSION_MSG_TYPE: u8 = 0; const BLOB_CERT_MSG_TYPE: u8 = 1; const INVALID_BLOB_ID_MSG_TYPE : u8 = 2; -SYNC_DONE message +PROOF_OF_POSSESSION message ----------------- -The body is empty, and the epoch for which sync is done is the epoch in the header. +The body contains the sui address followed by the bls public key (encoded as 48 byte fixed +size array) of the signer. BLOB_CERT message diff --git a/contracts/walrus/sources/init.move b/contracts/walrus/sources/init.move new file mode 100644 index 00000000..a73665d1 --- /dev/null +++ b/contracts/walrus/sources/init.move @@ -0,0 +1,49 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::init; + +use sui::clock::Clock; +use walrus::{staking, system}; + +/// Must only be created by `init`. +public struct InitCap has key, store { + id: UID, +} + +/// Init function, creates an init cap and transfers it to the sender. +/// This allows the sender to call the function to actually initialize the system +/// with the corresponding parameters. Once that function is called, the cap is destroyed. +fun init(ctx: &mut TxContext) { + let id = object::new(ctx); + let init_cap = InitCap { id }; + transfer::transfer(init_cap, ctx.sender()); +} + +/// Function to initialize walrus and share the system and staking objects. +/// This can only be called once, after which the `InitCap` is destroyed. +public fun initialize_walrus( + cap: InitCap, + epoch_zero_duration: u64, + epoch_duration: u64, + n_shards: u16, + max_epochs_ahead: u32, + clock: &Clock, + ctx: &mut TxContext, +) { + system::create_empty(max_epochs_ahead, ctx); + staking::create(epoch_zero_duration, epoch_duration, n_shards, clock, ctx); + cap.destroy(); +} + +fun destroy(cap: InitCap) { + let InitCap { id } = cap; + id.delete(); +} + +// === Test only === + +#[test_only] +public fun init_for_testing(ctx: &mut TxContext) { + init(ctx); +} diff --git a/contracts/walrus/sources/staking.move b/contracts/walrus/sources/staking.move new file mode 100644 index 00000000..69e99336 --- /dev/null +++ b/contracts/walrus/sources/staking.move @@ -0,0 +1,280 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_variable, unused_function, unused_field, unused_mut_parameter)] +/// Module: staking +module walrus::staking; + +use std::string::String; +use sui::{clock::Clock, coin::Coin, dynamic_object_field as df}; +use wal::wal::WAL; +use walrus::{ + staked_wal::StakedWal, + staking_inner::{Self, StakingInnerV1}, + storage_node::{Self, StorageNodeCap}, + system::System +}; + +/// Flag to indicate the version of the Walrus system. +const VERSION: u64 = 0; + +/// The one and only staking object. +public struct Staking has key { + id: UID, + version: u64, +} + +/// Creates and shares a new staking object. +/// Must only be called by the initialization function. +public(package) fun create( + epoch_zero_duration: u64, + epoch_duration: u64, + n_shards: u16, + clock: &Clock, + ctx: &mut TxContext, +) { + let mut staking = Staking { id: object::new(ctx), version: VERSION }; + df::add( + &mut staking.id, + VERSION, + staking_inner::new( + epoch_zero_duration, + epoch_duration, + n_shards, + clock, + ctx, + ), + ); + transfer::share_object(staking) +} + +// === Public API: Storage Node === + +/// Creates a staking pool for the candidate, registers the candidate as a storage node. +public fun register_candidate( + staking: &mut Staking, + // node info + name: String, + network_address: String, + public_key: vector, + network_public_key: vector, + proof_of_possession: vector, + // voting parameters + commission_rate: u64, + storage_price: u64, + write_price: u64, + node_capacity: u64, + ctx: &mut TxContext, +): StorageNodeCap { + // use the Pool Object ID as the identifier of the storage node + let node_id = staking + .inner_mut() + .create_pool( + name, + network_address, + public_key, + network_public_key, + proof_of_possession, + commission_rate, + storage_price, + write_price, + node_capacity, + ctx, + ); + + storage_node::new_cap(node_id, ctx) +} + +/// Blocks staking for the nodes staking pool +/// Marks node as "withdrawing", +/// - excludes it from the next committee selection +/// - still has to remain active while it is part of the committee and until all shards have +/// been transferred to its successor +/// - The staking pool is deleted once the last funds have been withdrawn from it by its stakers +public fun withdraw_node(staking: &mut Staking, cap: &mut StorageNodeCap) { + staking.inner_mut().set_withdrawing(cap.node_id()); + staking.inner_mut().withdraw_node(cap); +} + +/// Sets next_commission in the staking pool, which will then take effect as commission rate +/// one epoch after setting the value (to allow stakers to react to setting this). +public fun set_next_commission(staking: &mut Staking, cap: &StorageNodeCap, commission_rate: u64) { + staking.inner_mut().set_next_commission(cap, commission_rate); +} + +/// Returns the accumulated commission for the storage node. +public fun collect_commission(staking: &mut Staking, cap: &StorageNodeCap): Coin { + staking.inner_mut().collect_commission(cap) +} + +// === Voting === + +/// Sets the storage price vote for the pool. +public fun set_storage_price_vote(self: &mut Staking, cap: &StorageNodeCap, storage_price: u64) { + self.inner_mut().set_storage_price_vote(cap, storage_price); +} + +/// Sets the write price vote for the pool. +public fun set_write_price_vote(self: &mut Staking, cap: &StorageNodeCap, write_price: u64) { + self.inner_mut().set_write_price_vote(cap, write_price); +} + +/// Sets the node capacity vote for the pool. +public fun set_node_capacity_vote(self: &mut Staking, cap: &StorageNodeCap, node_capacity: u64) { + self.inner_mut().set_node_capacity_vote(cap, node_capacity); +} + +// === Update Node Parameters === + +/// Sets the public key of a node to be used starting from the next epoch for which the node is +/// selected. +public fun set_next_public_key( + self: &mut Staking, + cap: &StorageNodeCap, + public_key: vector, + proof_of_possession: vector, + ctx: &mut TxContext, +) { + self.inner_mut().set_next_public_key(cap, public_key, proof_of_possession, ctx); +} + +/// Sets the name of a storage node. +public fun set_name(self: &mut Staking, cap: &StorageNodeCap, name: String) { + self.inner_mut().set_name(cap, name); +} + +/// Sets the network address or host of a storage node. +public fun set_network_address(self: &mut Staking, cap: &StorageNodeCap, network_address: String) { + self.inner_mut().set_network_address(cap, network_address); +} + +/// Sets the public key used for TLS communication for a node. +public fun set_network_public_key( + self: &mut Staking, + cap: &StorageNodeCap, + network_public_key: vector, +) { + self.inner_mut().set_network_public_key(cap, network_public_key); +} + +// === Epoch Change === + +/// Ends the voting period and runs the apportionment if the current time allows. +/// Permissionless, can be called by anyone. +/// Emits: `EpochParametersSelected` event. +public fun voting_end(staking: &mut Staking, clock: &Clock) { + staking.inner_mut().voting_end(clock) +} + +/// Initiates the epoch change if the current time allows. +/// Emits: `EpochChangeStart` event. +public fun initiate_epoch_change(staking: &mut Staking, system: &mut System, clock: &Clock) { + let staking_inner = staking.inner_mut(); + let rewards = system.advance_epoch( + staking_inner.next_bls_committee(), + staking_inner.next_epoch_params(), + ); + + staking_inner.initiate_epoch_change(clock, rewards); +} + +/// Checks if the node should either have received the specified shards from the specified node +/// or vice-versa. +/// +/// - also checks that for the provided shards, this function has not been called before +/// - if so, slashes both nodes and emits an event that allows the receiving node to start +/// shard recovery +public fun shard_transfer_failed( + staking: &mut Staking, + cap: &StorageNodeCap, + other_node_id: ID, + shard_ids: vector, +) { + staking.inner_mut().shard_transfer_failed(cap, other_node_id, shard_ids); +} + +/// Signals to the contract that the node has received all its shards for the new epoch. +public fun epoch_sync_done( + staking: &mut Staking, + cap: &mut StorageNodeCap, + epoch: u32, + clock: &Clock, +) { + staking.inner_mut().epoch_sync_done(cap, epoch, clock); +} + +// === Public API: Staking === + +/// Stake `Coin` with the staking pool. +public fun stake_with_pool( + staking: &mut Staking, + to_stake: Coin, + node_id: ID, + ctx: &mut TxContext, +): StakedWal { + staking.inner_mut().stake_with_pool(to_stake, node_id, ctx) +} + +/// Marks the amount as a withdrawal to be processed and removes it from the stake weight of the +/// node. Allows the user to call withdraw_stake after the epoch change to the next epoch and +/// shard transfer is done. +public fun request_withdraw_stake( + staking: &mut Staking, + staked_wal: &mut StakedWal, + ctx: &mut TxContext, +) { + staking.inner_mut().request_withdraw_stake(staked_wal, ctx); +} + +#[allow(lint(self_transfer))] +/// Withdraws the staked amount from the staking pool. +public fun withdraw_stake( + staking: &mut Staking, + staked_wal: StakedWal, + ctx: &mut TxContext, +): Coin { + staking.inner_mut().withdraw_stake(staked_wal, ctx) +} + +// === Internals === + +/// Get a mutable reference to `StakingInner` from the `Staking`. +fun inner_mut(staking: &mut Staking): &mut StakingInnerV1 { + assert!(staking.version == VERSION); + df::borrow_mut(&mut staking.id, VERSION) +} + +/// Get an immutable reference to `StakingInner` from the `Staking`. +fun inner(staking: &Staking): &StakingInnerV1 { + assert!(staking.version == VERSION); + df::borrow(&staking.id, VERSION) +} + +// === Tests === + +#[test_only] +use sui::clock; + +#[test_only] +public(package) fun inner_for_testing(staking: &Staking): &StakingInnerV1 { + staking.inner() +} + +#[test_only] +public(package) fun new_for_testing(ctx: &mut TxContext): Staking { + let clock = clock::create_for_testing(ctx); + let mut staking = Staking { id: object::new(ctx), version: VERSION }; + df::add(&mut staking.id, VERSION, staking_inner::new(0, 10, 1000, &clock, ctx)); + clock.destroy_for_testing(); + staking +} + +#[test_only] +public(package) fun is_epoch_sync_done(self: &Staking): bool { + self.inner().is_epoch_sync_done() +} + +#[test_only] +fun new_id(ctx: &mut TxContext): ID { + ctx.fresh_object_address().to_id() +} diff --git a/contracts/walrus/sources/staking/active_set.move b/contracts/walrus/sources/staking/active_set.move new file mode 100644 index 00000000..206ae80a --- /dev/null +++ b/contracts/walrus/sources/staking/active_set.move @@ -0,0 +1,281 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Contains an active set of storage nodes. The active set is a smart collection +/// that only stores up to a 1000 nodes. The nodes are sorted by the amount of +/// staked WAL. Additionally, the active set tracks the total amount of staked +/// WAL to make the calculation of the rewards and voting power distribution easier. +module walrus::active_set; + +public struct ActiveSetEntry has store, copy, drop { + node_id: ID, + staked_amount: u64, +} + +/// The active set of storage nodes, a smart collection that only stores up +/// to a 1000 nodes. The nodes are sorted by the amount of staked WAL. +/// Additionally, the active set tracks the total amount of staked WAL to make +/// the calculation of the rewards and voting power distribution easier. +public struct ActiveSet has store, copy, drop { + /// The maximum number of storage nodes in the active set. + /// Potentially remove this field. + max_size: u16, + /// The minimum amount of staked WAL needed to enter the active set. This is used to + /// determine if a storage node can be added to the active set. + threshold_stake: u64, + /// The list of storage nodes in the active set and their stake. + nodes: vector, + /// The total amount of staked WAL in the active set. + total_stake: u64, +} + +/// Creates a new active set with the given `size` and `threshold_stake`. The +/// latter is used to filter out storage nodes that do not have enough staked +/// WAL to be included in the active set initially. +public(package) fun new(max_size: u16, threshold_stake: u64): ActiveSet { + assert!(max_size > 0); + ActiveSet { + max_size, + threshold_stake, + nodes: vector[], + total_stake: 0, + } +} + +/// Inserts the node if it is not already in the active set, otherwise updates its stake. +/// Returns true if the node is in the set after the operation, false otherwise. +public(package) fun insert_or_update(set: &mut ActiveSet, node_id: ID, staked_amount: u64): bool { + if (set.update(node_id, staked_amount)) { + return true + }; + set.insert(node_id, staked_amount) +} + +/// Updates the staked amount of the storage node with the given `node_id` in +/// the active set. Returns true if the node is in the set. +public(package) fun update(set: &mut ActiveSet, node_id: ID, staked_amount: u64): bool { + let index = set.nodes.find_index!(|entry| entry.node_id == node_id); + if (index.is_none()) { + return false + }; + index.do!(|idx| { + set.total_stake = set.total_stake - set.nodes[idx].staked_amount + staked_amount; + set.nodes[idx].staked_amount = staked_amount; + }); + true +} + +/// Inserts a storage node with the given `node_id` and `staked_amount` into the +/// active set. The node is only added if it has enough staked WAL to be included +/// in the active set. If the active set is full, the node with the smallest +/// staked WAL is removed to make space for the new node. +/// Returns true if the node was inserted, false otherwise. +public(package) fun insert(set: &mut ActiveSet, node_id: ID, staked_amount: u64): bool { + assert!(set.nodes.find_index!(|entry| entry.node_id == node_id ).is_none()); + + // Check if the staked amount is enough to be included in the active set. + if (staked_amount < set.threshold_stake) return false; + + // If the nodes are less than the max size, insert the node. + if (set.nodes.length() as u16 < set.max_size) { + set.total_stake = set.total_stake + staked_amount; + set.nodes.push_back(ActiveSetEntry {node_id, staked_amount}); + true + } else { + // Find the node with the smallest amount of stake and less than the new node. + let mut min_stake = staked_amount; + let mut min_idx = option::none(); + set.nodes.length().do!(|i| { + if (set.nodes[i].staked_amount < min_stake) { + min_idx = option::some(i); + min_stake = set.nodes[i].staked_amount; + } + }); + // If there is such a node, replace it in the list. + if (min_idx.is_some()) { + let min_idx = min_idx.extract(); + set.total_stake = set.total_stake - min_stake + staked_amount; + *&mut set.nodes[min_idx] = ActiveSetEntry { node_id, staked_amount }; + true + } else { + false + } + } +} + +/// Removes the storage node with the given `node_id` from the active set. +public(package) fun remove(set: &mut ActiveSet, node_id: ID) { + let index = set.nodes.find_index!(|entry| entry.node_id == node_id); + index.do!(|idx| { + let entry = set.nodes.swap_remove(idx); + set.total_stake = set.total_stake - entry.staked_amount; + }); +} + +/// The maximum size of the active set. +public(package) fun max_size(set: &ActiveSet): u16 { set.max_size } + +/// The current size of the active set. +public(package) fun size(set: &ActiveSet): u16 { set.nodes.length() as u16 } + +/// The IDs of the nodes in the active set. +public(package) fun active_ids(set: &ActiveSet): vector { + set.nodes.map_ref!(|node| node.node_id) +} + +/// The IDs and stake of the nodes in the active set. +public(package) fun active_ids_and_stake(set: &ActiveSet): (vector, vector) { + let mut active_ids = vector[]; + let mut stake = vector[]; + set.nodes.do_ref!(|entry| { + active_ids.push_back(entry.node_id); + stake.push_back(entry.staked_amount); + }); + (active_ids, stake) +} + +/// The minimum amount of staked WAL in the active set. +public(package) fun threshold_stake(set: &ActiveSet): u64 { set.threshold_stake } + +/// The total amount of staked WAL in the active set. +public(package) fun total_stake(set: &ActiveSet): u64 { set.total_stake } + +/// Current minimum stake needed to be in the active set. +/// If the active set is full, the minimum stake is the stake of the node with the smallest stake. +/// Otherwise, the minimum stake is the threshold stake. +/// Test only to discourage using this since it iterates over all nodes. When the `min_stake` is +/// needed within [`ActiveSet`], prefer inlining/integrating it in other loops. +#[test_only] +public(package) fun cur_min_stake(set: &ActiveSet): u64 { + if (set.nodes.length() == set.max_size as u64) { + let mut min_stake = std::u64::max_value!(); + set.nodes.length().do!(|i| { + if (set.nodes[i].staked_amount < min_stake) { + min_stake = set.nodes[i].staked_amount; + } + }); + min_stake + } else { + set.threshold_stake + } +} + +#[test_only] +public fun stake_for_node(set: &ActiveSet, node_id: ID): u64 { + set.nodes.find_index!(|entry| entry.node_id == node_id) + .map!(|index| set.nodes[index].staked_amount ).destroy_with_default(0) +} + +// === Test === + +#[test] +fun test_evict_correct_node_simple() { + let mut set = new(5, 0); + set.insert_or_update(object::id_from_address(@1), 10); + set.insert_or_update(object::id_from_address(@2), 9); + set.insert_or_update(object::id_from_address(@3), 8); + set.insert_or_update(object::id_from_address(@4), 7); + set.insert_or_update(object::id_from_address(@5), 6); + + let mut total_stake = 10 + 9 + 8 + 7 + 6; + + assert!(set.total_stake == total_stake); + + // insert another node which should eject node 5 + set.insert_or_update(object::id_from_address(@6), 11); + + // check if total stake was updated correctly + total_stake = total_stake - 6 + 11; + assert!(set.total_stake == total_stake); + + let active_ids = set.active_ids(); + + // node 5 should not be part of the set + assert!(!active_ids.contains(&object::id_from_address(@5))); + + // all other nodes should be + assert!(active_ids.contains(&object::id_from_address(@1))); + assert!(active_ids.contains(&object::id_from_address(@2))); + assert!(active_ids.contains(&object::id_from_address(@3))); + assert!(active_ids.contains(&object::id_from_address(@4))); + assert!(active_ids.contains(&object::id_from_address(@6))); +} + +#[test] +fun test_evict_correct_node_with_updates() { + let nodes = vector[ + object::id_from_address(@1), + object::id_from_address(@2), + object::id_from_address(@3), + object::id_from_address(@4), + object::id_from_address(@5), + object::id_from_address(@6), + ]; + + let mut set = new(5, 0); + set.insert_or_update(nodes[3], 7); + set.insert_or_update(nodes[0], 10); + set.insert_or_update(nodes[2], 8); + set.insert_or_update(nodes[1], 9); + set.insert_or_update(nodes[4], 6); + + let mut total_stake = 10 + 9 + 8 + 7 + 6; + + assert!(set.total_stake == total_stake); + + // update nodes again + set.insert_or_update(nodes[0], 12); + // check if total stake was updated correctly + total_stake = total_stake - 10 + 12; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[0]) == 12); + + set.insert_or_update(nodes[2], 13); + // check if total stake was updated correctly + total_stake = total_stake - 8 + 13; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[2]) == 13); + + set.insert_or_update(nodes[3], 9); + // check if total stake was updated correctly + total_stake = total_stake - 7 + 9; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[3]) == 9); + + set.insert_or_update(nodes[1], 10); + // check if total stake was updated correctly + total_stake = total_stake - 9 + 10; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[1]) == 10); + + set.insert_or_update(nodes[4], 7); + // check if total stake was updated correctly + total_stake = total_stake - 6 + 7; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[4]) == 7); + + // insert another node which should eject nodes[4] (address @5) + set.insert_or_update(nodes[5], 11); + // check if total stake was updated correctly + total_stake = total_stake - 7 + 11; + assert!(set.total_stake == total_stake); + // check if the stake of the node was updated correctly + assert!(set.stake_for_node(nodes[5]) == 11); + + let active_ids = set.active_ids(); + + // node 5 should not be part of the set + assert!(!active_ids.contains(&nodes[4])); + + // all other nodes should be + assert!(active_ids.contains(&nodes[0])); + assert!(active_ids.contains(&nodes[1])); + assert!(active_ids.contains(&nodes[2])); + assert!(active_ids.contains(&nodes[3])); + assert!(active_ids.contains(&nodes[5])); +} diff --git a/contracts/walrus/sources/staking/committee.move b/contracts/walrus/sources/staking/committee.move new file mode 100644 index 00000000..cf61f6ce --- /dev/null +++ b/contracts/walrus/sources/staking/committee.move @@ -0,0 +1,135 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// This module defines the `Committee` struct which stores the current +/// committee with shard assignments. Additionally, it manages transitions / +/// transfers of shards between committees with the least amount of changes. +module walrus::committee; + +use sui::vec_map::{Self, VecMap}; + +/// Represents the current committee in the system. Each node in the committee +/// has assigned shard IDs. +public struct Committee(VecMap>) has store, copy, drop; + +/// Creates an empty committee. Only relevant for epoch 0, when no nodes are +/// assigned any shards. +public(package) fun empty(): Committee { Committee(vec_map::empty()) } + +/// Initializes the committee with the given `assigned_number` of shards per +/// node. Shards are assigned sequentially to each node. +public(package) fun initialize(assigned_number: VecMap): Committee { + let mut shard_idx: u16 = 0; + let (keys, values) = assigned_number.into_keys_values(); + let cmt = vec_map::from_keys_values( + keys, + values.map!(|v| vector::tabulate!(v as u64, |_| { + let res = shard_idx; + shard_idx = shard_idx + 1; + res + })), + ); + + Committee(cmt) +} + +/// Transitions the current committee to the new committee with the given shard +/// assignments. The function tries to minimize the number of changes by keeping +/// as many shards in place as possible. +/// +/// This assumes that the number of shards in the new committee is equal to the +/// number of shards in the current committee. Check for this is not performed. +public(package) fun transition(cmt: &Committee, mut new_assignments: VecMap): Committee { + let mut new_cmt = vec_map::empty(); + let mut to_move = vector[]; + let size = cmt.0.size(); + + size.do!(|idx| { + let (node_id, prev_shards) = cmt.0.get_entry_by_idx(idx); + let node_id = *node_id; + let assigned_len = new_assignments.get_idx_opt(&node_id).map!(|idx| { + let (_, value) = new_assignments.remove_entry_by_idx(idx); + value as u64 + }); + + // if the node is not in the new committee, remove all shards, make + // them available for reassignment + if (assigned_len.is_none() || assigned_len.borrow() == &0) { + let shards = cmt.0.get(&node_id); + to_move.append(*shards); + return + }; + + let curr_len = prev_shards.length(); + let assigned_len = assigned_len.destroy_some(); + + // node stays the same, we copy the shards over, best scenario + if (curr_len == assigned_len) { + new_cmt.insert(node_id, *prev_shards); + }; + + // if the node is in the new committee, check if the number of shards + // assigned to the node has decreased. If so, remove the extra shards, + // and move the node to the new committee + if (curr_len > assigned_len) { + let mut node_shards = *prev_shards; + (curr_len - assigned_len).do!(|_| to_move.push_back(node_shards.pop_back())); + new_cmt.insert(node_id, node_shards); + }; + + // if the node is in the new committee, and we already freed enough + // shards from other nodes, perform the reassignment. Alternatively, + // mark the node as needing more shards, so when we free up enough + // shards, we can assign them to this node + if (curr_len < assigned_len) { + let diff = assigned_len - curr_len; + if (to_move.length() >= diff) { + let mut node_shards = *prev_shards; + diff.do!(|_| node_shards.push_back(to_move.pop_back())); + new_cmt.insert(node_id, node_shards); + } else { + // insert it back, we didn't have enough shards to assign + new_assignments.insert(node_id, assigned_len as u16); + }; + }; + }); + + // Now the `new_assignments` only contains nodes for which we didn't have + // enough shards to assign, and the nodes that were not part of the old + // committee. + let (keys, values) = new_assignments.into_keys_values(); + keys.zip_do!(values, |key, value| { + if (value == 0) return; // ignore nodes with 0 shards + + let mut current_shards = cmt.0.try_get(&key).destroy_or!(vector[]); + current_shards + .length() + .diff(value as u64) + .do!(|_| current_shards.push_back(to_move.pop_back())); + + new_cmt.insert(key, current_shards); + }); + + Committee(new_cmt) +} + +#[syntax(index)] +/// Get the shards assigned to the given `node_id`. +public(package) fun shards(cmt: &Committee, node_id: &ID): &vector { + cmt.0.get(node_id) +} + +/// Get the number of nodes in the committee. +public(package) fun size(cmt: &Committee): u64 { + cmt.0.size() +} + +/// Get the inner representation of the committee. +public(package) fun inner(cmt: &Committee): &VecMap> { + &cmt.0 +} + +/// Copy the inner representation of the committee. +public(package) fun to_inner(cmt: &Committee): VecMap> { + cmt.0 +} diff --git a/contracts/walrus/sources/staking/exchange_rate.move b/contracts/walrus/sources/staking/exchange_rate.move new file mode 100644 index 00000000..57b0e3c9 --- /dev/null +++ b/contracts/walrus/sources/staking/exchange_rate.move @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// A utility module which implements an `ExchangeRate` struct and its methods. +/// It stores a fixed point exchange rate between the Wal token and pool token. +module walrus::pool_exchange_rate; + +/// Represents the exchange rate for the staking pool. +public struct PoolExchangeRate has store, copy, drop { + /// Amount of staked WAL tokens this epoch. + wal_amount: u128, + /// Amount of total tokens in the pool this epoch. + pool_token_amount: u128, +} + +/// Create an empty exchange rate. +public(package) fun empty(): PoolExchangeRate { + PoolExchangeRate { + wal_amount: 0, + pool_token_amount: 0, + } +} + +/// Create a new exchange rate with the given amounts. +public(package) fun new(wal_amount: u64, pool_token_amount: u64): PoolExchangeRate { + PoolExchangeRate { + wal_amount: (wal_amount as u128), + pool_token_amount: (pool_token_amount as u128), + } +} + +public(package) fun get_wal_amount(exchange_rate: &PoolExchangeRate, token_amount: u64): u64 { + // When either amount is 0, that means we have no stakes with this pool. + // The other amount might be non-zero when there's dust left in the pool. + if (exchange_rate.wal_amount == 0 || exchange_rate.pool_token_amount == 0) { + return token_amount + }; + + let token_amount = (token_amount as u128); + let res = token_amount * exchange_rate.wal_amount / exchange_rate.pool_token_amount; + + res as u64 +} + +public(package) fun get_token_amount(exchange_rate: &PoolExchangeRate, wal_amount: u64): u64 { + // When either amount is 0, that means we have no stakes with this pool. + // The other amount might be non-zero when there's dust left in the pool. + if (exchange_rate.wal_amount == 0 || exchange_rate.pool_token_amount == 0) { + return wal_amount + }; + + let wal_amount = (wal_amount as u128); + let res = wal_amount * exchange_rate.pool_token_amount / exchange_rate.wal_amount; + + res as u64 +} diff --git a/contracts/walrus/sources/staking/pending_values.move b/contracts/walrus/sources/staking/pending_values.move new file mode 100644 index 00000000..b9327304 --- /dev/null +++ b/contracts/walrus/sources/staking/pending_values.move @@ -0,0 +1,79 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::pending_values; + +use sui::vec_map::{Self, VecMap}; + +/// Represents a map of pending values. The key is the epoch when the value is +/// pending, and the value is the amount of WALs or pool tokens. +public struct PendingValues(VecMap) has store, drop, copy; + +/// Create a new empty `PendingValues` instance. +public(package) fun empty(): PendingValues { PendingValues(vec_map::empty()) } + +public(package) fun insert_or_add(self: &mut PendingValues, epoch: u32, value: u64) { + let map = &mut self.0; + if (!map.contains(&epoch)) { + map.insert(epoch, value); + } else { + let curr = map[&epoch]; + *&mut map[&epoch] = curr + value; + }; +} + +/// Get the total value of the pending values up to the given epoch. +public(package) fun value_at(self: &PendingValues, epoch: u32): u64 { + self.0.keys().fold!(0, |mut value, e| { + if (e <= epoch) value = value + self.0[&e]; + value + }) +} + +/// Reduce the pending values to the given epoch. This method removes all the +/// values that are pending for epochs less than or equal to the given epoch. +public(package) fun flush(self: &mut PendingValues, to_epoch: u32): u64 { + let mut value = 0; + self.0.keys().do!(|epoch| if (epoch <= to_epoch) { + let (_, epoch_value) = self.0.remove(&epoch); + value = value + epoch_value; + }); + value +} + +/// Unwrap the `PendingValues` into a `VecMap`. +public(package) fun unwrap(self: PendingValues): VecMap { + let PendingValues(map) = self; + map +} + +/// Check if the `PendingValues` is empty. +public(package) fun is_empty(self: &PendingValues): bool { self.0.is_empty() } + +#[test] +fun test_pending_values() { + use std::unit_test::assert_eq; + + let mut pending = empty(); + assert!(pending.is_empty()); + + pending.insert_or_add(0, 10); + pending.insert_or_add(0, 10); + pending.insert_or_add(1, 20); + + // test reads + assert_eq!(pending.value_at(0), 20); + assert_eq!(pending.value_at(1), 40); + + // test flushing, and reads after flushing + assert_eq!(pending.flush(0), 20); + assert_eq!(pending.value_at(0), 0); + + // flush the rest of the values and check if the map is empty + assert_eq!(pending.value_at(1), 20); + assert_eq!(pending.flush(1), 20); + assert!(pending.is_empty()); + + // unwrap the pending values + let _ = pending.unwrap(); +} diff --git a/contracts/walrus/sources/staking/staked_wal.move b/contracts/walrus/sources/staking/staked_wal.move new file mode 100644 index 00000000..42b8bb09 --- /dev/null +++ b/contracts/walrus/sources/staking/staked_wal.move @@ -0,0 +1,172 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Module: `staked_wal` +/// +/// Implements the `StakedWal` functionality - a staked WAL is an object that +/// represents a staked amount of WALs in a staking pool. It is created in the +/// `staking_pool` on staking and can be split, joined, and burned. The burning +/// is performed via the `withdraw_stake` method in the `staking_pool`. +module walrus::staked_wal; + +use sui::balance::Balance; +use wal::wal::WAL; + +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const ENotWithdrawing: u64 = 0; +const EMetadataMismatch: u64 = 1; +const EInvalidAmount: u64 = 2; +const ENonZeroPrincipal: u64 = 3; +const ECantJoinWithdrawing: u64 = 4; +const ECantSplitWithdrawing: u64 = 5; + +/// The state of the staked WAL. It can be either `Staked` or `Withdrawing`. +/// The `Withdrawing` state contains the epoch when the staked WAL can be +/// +public enum StakedWalState has store, copy, drop { + // Default state of the staked WAL - it is staked in the staking pool. + Staked, + // The staked WAL is in the process of withdrawing. The value inside the + // variant is the epoch when the staked WAL can be withdrawn. + Withdrawing { withdraw_epoch: u32, pool_token_amount: u64 }, +} + +/// Represents a staked WAL, does not store the `Balance` inside, but uses +/// `u64` to represent the staked amount. Behaves similarly to `Balance` and +/// `Coin` providing methods to `split` and `join`. +public struct StakedWal has key, store { + id: UID, + /// Whether the staked WAL is active or withdrawing. + state: StakedWalState, + /// ID of the staking pool. + node_id: ID, + /// The staked amount. + principal: Balance, + /// The Walrus epoch when the staked WAL was activated. + activation_epoch: u32, +} + +/// Protected method to create a new staked WAL. +public(package) fun mint( + node_id: ID, + principal: Balance, + activation_epoch: u32, + ctx: &mut TxContext, +): StakedWal { + StakedWal { + id: object::new(ctx), + state: StakedWalState::Staked, + node_id, + principal, + activation_epoch, + } +} + +/// Burns the staked WAL and returns the `principal`. +public(package) fun into_balance(sw: StakedWal): Balance { + let StakedWal { id, principal, .. } = sw; + id.delete(); + principal +} + +/// Sets the staked WAL state to `Withdrawing` +public(package) fun set_withdrawing( + sw: &mut StakedWal, + withdraw_epoch: u32, + pool_token_amount: u64, +) { + sw.state = StakedWalState::Withdrawing { withdraw_epoch, pool_token_amount }; +} + +// === Accessors === + +/// Returns the `node_id` of the staked WAL. +public fun node_id(sw: &StakedWal): ID { sw.node_id } + +/// Returns the `principal` of the staked WAL. Called `value` to be consistent +/// with `Coin`. +public fun value(sw: &StakedWal): u64 { sw.principal.value() } + +/// Returns the `activation_epoch` of the staked WAL. +public fun activation_epoch(sw: &StakedWal): u32 { sw.activation_epoch } + +/// Returns true if the staked WAL is in the `Staked` state. +public fun is_staked(sw: &StakedWal): bool { sw.state == StakedWalState::Staked } + +/// Checks whether the staked WAL is in the `Withdrawing` state. +public fun is_withdrawing(sw: &StakedWal): bool { + match (sw.state) { + StakedWalState::Withdrawing { .. } => true, + _ => false, + } +} + +/// Returns the `withdraw_epoch` of the staked WAL if it is in the `Withdrawing`. +/// Aborts otherwise. +public fun withdraw_epoch(sw: &StakedWal): u32 { + match (sw.state) { + StakedWalState::Withdrawing { withdraw_epoch, .. } => withdraw_epoch, + _ => abort ENotWithdrawing, + } +} + +/// Return the `withdraw_amount` of the staked WAL if it is in the `Withdrawing`. +/// Aborts otherwise. +public fun pool_token_amount(sw: &StakedWal): u64 { + match (sw.state) { + StakedWalState::Withdrawing { pool_token_amount, .. } => pool_token_amount, + _ => abort ENotWithdrawing, + } +} + +// === Public APIs === + +/// Joins the staked WAL with another staked WAL, adding the `principal` of the +/// `other` staked WAL to the current staked WAL. +/// +/// Aborts if the `node_id` or `activation_epoch` of the staked WALs do not match. +public fun join(sw: &mut StakedWal, other: StakedWal) { + let StakedWal { id, state, node_id, activation_epoch, principal } = other; + assert!(sw.state == state, EMetadataMismatch); + assert!(sw.node_id == node_id, EMetadataMismatch); + assert!(!sw.is_withdrawing(), ECantJoinWithdrawing); + assert!(sw.activation_epoch == activation_epoch, EMetadataMismatch); + + id.delete(); + + sw.principal.join(principal); +} + +/// Splits the staked WAL into two parts, one with the `amount` and the other +/// with the remaining `principal`. The `node_id`, `activation_epoch` are the +/// same for both the staked WALs. +/// +/// Aborts if the `amount` is greater than the `principal` of the staked WAL. +public fun split(sw: &mut StakedWal, amount: u64, ctx: &mut TxContext): StakedWal { + assert!(sw.principal.value() >= amount, EInvalidAmount); + assert!(!sw.is_withdrawing(), ECantSplitWithdrawing); + + StakedWal { + id: object::new(ctx), + state: sw.state, // state is preserved + node_id: sw.node_id, + principal: sw.principal.split(amount), + activation_epoch: sw.activation_epoch, + } +} + +/// Destroys the staked WAL if the `principal` is zero. Ignores the `node_id` +/// and `activation_epoch` of the staked WAL given that it is zero. +public fun destroy_zero(sw: StakedWal) { + assert!(sw.principal.value() == 0, ENonZeroPrincipal); + let StakedWal { id, principal, .. } = sw; + principal.destroy_zero(); + id.delete(); +} + +#[test_only] +public fun destroy_for_testing(sw: StakedWal) { + let StakedWal { id, principal, .. } = sw; + principal.destroy_for_testing(); + id.delete(); +} diff --git a/contracts/walrus/sources/staking/staking_inner.move b/contracts/walrus/sources/staking/staking_inner.move new file mode 100644 index 00000000..26131df4 --- /dev/null +++ b/contracts/walrus/sources/staking/staking_inner.move @@ -0,0 +1,731 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::staking_inner; + +use std::string::String; +use sui::{ + balance::{Self, Balance}, + clock::Clock, + coin::Coin, + object_table::{Self, ObjectTable}, + priority_queue::{Self, PriorityQueue}, + vec_map +}; +use wal::wal::WAL; +use walrus::{ + active_set::{Self, ActiveSet}, + bls_aggregate::{Self, BlsCommittee}, + committee::{Self, Committee}, + epoch_parameters::{Self, EpochParams}, + events, + staked_wal::StakedWal, + staking_pool::{Self, StakingPool}, + storage_node::StorageNodeCap, + walrus_context::{Self, WalrusContext} +}; + +/// The minimum amount of staked WAL required to be included in the active set. +const MIN_STAKE: u64 = 0; + +/// Temporary upper limit for the number of storage nodes. +const TEMP_ACTIVE_SET_SIZE_LIMIT: u16 = 100; + +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const EWrongEpochState: u64 = 0; +const EInvalidSyncEpoch: u64 = 1; +const EDuplicateSyncDone: u64 = 2; +const ENoStake: u64 = 3; +const ENotInCommittee: u64 = 4; +const ENotImplemented: u64 = 5; + +/// The epoch state. +public enum EpochState has store, copy, drop { + // Epoch change is currently in progress. Contains the weight of the nodes that + // have already attested that they finished the sync. + EpochChangeSync(u16), + // Epoch change has been completed at the contained timestamp. + EpochChangeDone(u64), + // The parameters for the next epoch have been selected. + // The contained timestamp is the start of the current epoch. + NextParamsSelected(u64), +} + +/// The inner object for the staking part of the system. +public struct StakingInnerV1 has store, key { + /// The object ID + id: UID, + /// The number of shards in the system. + n_shards: u16, + /// The duration of an epoch in ms. Does not affect the first (zero) epoch. + epoch_duration: u64, + /// Special parameter, used only for the first epoch. The timestamp when the + /// first epoch can be started. + first_epoch_start: u64, + /// Stored staking pools, each identified by a unique `ID` and contains + /// the `StakingPool` object. Uses `ObjectTable` to make the pool discovery + /// easier by avoiding wrapping. + /// + /// The key is the ID of the staking pool. + pools: ObjectTable, + /// The current epoch of the Walrus system. The epochs are not the same as + /// the Sui epochs, not to be mistaken with `ctx.epoch()`. + epoch: u32, + /// Stores the active set of storage nodes. Provides automatic sorting and + /// tracks the total amount of staked WAL. + active_set: ActiveSet, + /// The next committee in the system. + next_committee: Option, + /// The current committee in the system. + committee: Committee, + /// The previous committee in the system. + previous_committee: Committee, + /// The next epoch parameters. + next_epoch_params: Option, + /// The state of the current epoch. + epoch_state: EpochState, + /// Rewards left over from the previous epoch that couldn't be distributed due to rounding. + leftover_rewards: Balance, +} + +/// Creates a new `StakingInnerV1` object with default values. +public(package) fun new( + epoch_zero_duration: u64, + epoch_duration: u64, + n_shards: u16, + clock: &Clock, + ctx: &mut TxContext, +): StakingInnerV1 { + StakingInnerV1 { + id: object::new(ctx), + n_shards, + epoch_duration, + first_epoch_start: epoch_zero_duration + clock.timestamp_ms(), + pools: object_table::new(ctx), + epoch: 0, + active_set: active_set::new(TEMP_ACTIVE_SET_SIZE_LIMIT, MIN_STAKE), + next_committee: option::none(), + committee: committee::empty(), + previous_committee: committee::empty(), + next_epoch_params: option::none(), + epoch_state: EpochState::EpochChangeDone(clock.timestamp_ms()), + leftover_rewards: balance::zero(), + } +} + +// === Staking Pool / Storage Node === + +/// Creates a new staking pool with the given `commission_rate`. +public(package) fun create_pool( + self: &mut StakingInnerV1, + name: String, + network_address: String, + public_key: vector, + network_public_key: vector, + proof_of_possession: vector, + commission_rate: u64, + storage_price: u64, + write_price: u64, + node_capacity: u64, + ctx: &mut TxContext, +): ID { + let pool = staking_pool::new( + name, + network_address, + public_key, + network_public_key, + proof_of_possession, + commission_rate, + storage_price, + write_price, + node_capacity, + &self.new_walrus_context(), + ctx, + ); + + let node_id = object::id(&pool); + self.pools.add(node_id, pool); + node_id +} + +/// Blocks staking for the pool, marks it as "withdrawing". +#[allow(unused_mut_parameter)] +public(package) fun withdraw_node(self: &mut StakingInnerV1, cap: &mut StorageNodeCap) { + let wctx = &self.new_walrus_context(); + self.pools[cap.node_id()].set_withdrawing(wctx); +} + +public(package) fun collect_commission(_: &mut StakingInnerV1, _: &StorageNodeCap): Coin { + abort ENotImplemented +} + +public(package) fun voting_end(self: &mut StakingInnerV1, clock: &Clock) { + // Check if it's time to end the voting. + let last_epoch_change = match (self.epoch_state) { + EpochState::EpochChangeDone(last_epoch_change) => last_epoch_change, + _ => abort EWrongEpochState, + }; + + let now = clock.timestamp_ms(); + let param_selection_delta = self.epoch_duration / 2; + + // We don't need a delay for the epoch zero. + if (self.epoch != 0) { + assert!(now >= last_epoch_change + param_selection_delta, EWrongEpochState); + } else { + assert!(now >= self.first_epoch_start, EWrongEpochState); + }; + + // Assign the next epoch committee. + self.select_committee(); + self.next_epoch_params = option::some(self.calculate_votes()); + + // Set the new epoch state. + self.epoch_state = EpochState::NextParamsSelected(last_epoch_change); + + // Emit event that parameters have been selected. + events::emit_epoch_parameters_selected(self.epoch + 1); +} + +/// Calculates the votes for the next epoch parameters. The function sorts the +/// write and storage prices and picks the value that satisfies a quorum of the weight. +public(package) fun calculate_votes(self: &StakingInnerV1): EpochParams { + assert!(self.next_committee.is_some()); + + let size = self.next_committee.borrow().size(); + let inner = self.next_committee.borrow().inner(); + let mut write_prices = priority_queue::new(vector[]); + let mut storage_prices = priority_queue::new(vector[]); + let mut capacity_votes = priority_queue::new(vector[]); + + size.do!(|i| { + let (node_id, shards) = inner.get_entry_by_idx(i); + let pool = &self.pools[*node_id]; + let weight = shards.length(); + write_prices.insert(pool.write_price(), weight); + storage_prices.insert(pool.storage_price(), weight); + // The vote for capacity is determined by the node capacity and number of assigned shards. + let capacity_vote = pool.node_capacity() / weight * (self.n_shards as u64); + capacity_votes.insert(capacity_vote, weight); + }); + + epoch_parameters::new( + quorum_above(&mut capacity_votes, self.n_shards), + quorum_below(&mut storage_prices, self.n_shards), + quorum_below(&mut write_prices, self.n_shards), + ) +} + +/// Take the highest value, s.t. a quorum (2f + 1) voted for a value larger or equal to this. +fun quorum_above(vote_queue: &mut PriorityQueue, n_shards: u16): u64 { + let threshold_weight = (n_shards - (n_shards - 1) / 3) as u64; + take_threshold_value(vote_queue, threshold_weight) +} + +/// Take the lowest value, s.t. a quorum (2f + 1) voted for a value lower or equal to this. +fun quorum_below(vote_queue: &mut PriorityQueue, n_shards: u16): u64 { + let threshold_weight = ((n_shards - 1) / 3 + 1) as u64; + take_threshold_value(vote_queue, threshold_weight) +} + +fun take_threshold_value(vote_queue: &mut PriorityQueue, threshold_weight: u64): u64 { + let mut sum_weight = 0; + // The loop will always succeed if `threshold_weight` is smaller than the total weight. + loop { + let (value, weight) = vote_queue.pop_max(); + sum_weight = sum_weight + weight; + if (sum_weight >= threshold_weight) { + return value + }; + } +} + +// === Voting === + +/// Sets the next commission rate for the pool. +public(package) fun set_next_commission( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + commission_rate: u64, +) { + self.pools[cap.node_id()].set_next_commission(commission_rate); +} + +/// Sets the storage price vote for the pool. +public(package) fun set_storage_price_vote( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + storage_price: u64, +) { + self.pools[cap.node_id()].set_next_storage_price(storage_price); +} + +/// Sets the write price vote for the pool. +public(package) fun set_write_price_vote( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + write_price: u64, +) { + self.pools[cap.node_id()].set_next_write_price(write_price); +} + +/// Sets the node capacity vote for the pool. +public(package) fun set_node_capacity_vote( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + node_capacity: u64, +) { + self.pools[cap.node_id()].set_next_node_capacity(node_capacity); +} + +// === Update Node Parameters === + +/// Sets the public key of a node to be used starting from the next epoch for which the node is +/// selected. +public(package) fun set_next_public_key( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + public_key: vector, + proof_of_possession: vector, + ctx: &TxContext, +) { + let wctx = &self.new_walrus_context(); + self.pools[cap.node_id()].set_next_public_key(public_key, proof_of_possession, wctx, ctx); +} + +/// Sets the name of a storage node. +public(package) fun set_name(self: &mut StakingInnerV1, cap: &StorageNodeCap, name: String) { + self.pools[cap.node_id()].set_name(name); +} + +/// Sets the network address or host of a storage node. +public(package) fun set_network_address( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + network_address: String, +) { + self.pools[cap.node_id()].set_network_address(network_address); +} + +/// Sets the public key used for TLS communication for a node. +public(package) fun set_network_public_key( + self: &mut StakingInnerV1, + cap: &StorageNodeCap, + network_public_key: vector, +) { + self.pools[cap.node_id()].set_network_public_key(network_public_key); +} + +// === Staking === + +/// Blocks staking for the pool, marks it as "withdrawing". +public(package) fun set_withdrawing(self: &mut StakingInnerV1, node_id: ID) { + let wctx = &self.new_walrus_context(); + self.pools[node_id].set_withdrawing(wctx); +} + +/// Destroys the pool if it is empty, after the last stake has been withdrawn. +public(package) fun destroy_empty_pool( + self: &mut StakingInnerV1, + node_id: ID, + _ctx: &mut TxContext, +) { + self.pools.remove(node_id).destroy_empty() +} + +/// Stakes the given amount of `T` with the pool, returning the `StakedWal`. +public(package) fun stake_with_pool( + self: &mut StakingInnerV1, + to_stake: Coin, + node_id: ID, + ctx: &mut TxContext, +): StakedWal { + let wctx = &self.new_walrus_context(); + let pool = &mut self.pools[node_id]; + let staked_wal = pool.stake(to_stake.into_balance(), wctx, ctx); + + // Active set only tracks the stake for the next vote, which either happens for the committee + // in wctx.epoch() + 1, or in wctx.epoch() + 2, depending on whether the vote already happened. + let balance = match (self.epoch_state) { + EpochState::NextParamsSelected(_) => pool.wal_balance_at_epoch(wctx.epoch() + 2), + _ => pool.wal_balance_at_epoch(wctx.epoch() + 1), + }; + self.active_set.insert_or_update(node_id, balance); + staked_wal +} + +/// Requests withdrawal of the given amount from the `StakedWAL`, marking it as +/// `Withdrawing`. Once the epoch is greater than the `withdraw_epoch`, the +/// withdrawal can be performed. +public(package) fun request_withdraw_stake( + self: &mut StakingInnerV1, + staked_wal: &mut StakedWal, + _ctx: &mut TxContext, +) { + let wctx = &self.new_walrus_context(); + self.pools[staked_wal.node_id()].request_withdraw_stake(staked_wal, wctx); +} + +/// Perform the withdrawal of the staked WAL, returning the amount to the caller. +/// The `StakedWal` must be in the `Withdrawing` state, and the epoch must be +/// greater than the `withdraw_epoch`. +public(package) fun withdraw_stake( + self: &mut StakingInnerV1, + staked_wal: StakedWal, + ctx: &mut TxContext, +): Coin { + let wctx = &self.new_walrus_context(); + self.pools[staked_wal.node_id()].withdraw_stake(staked_wal, wctx).into_coin(ctx) +} + +// === System === + +/// Selects the committee for the next epoch. +public(package) fun select_committee(self: &mut StakingInnerV1) { + assert!(self.next_committee.is_none()); + + let (active_ids, shards) = self.apportionment(); + let distribution = vec_map::from_keys_values(active_ids, shards); + + // if we're dealing with the first epoch, we need to assign the shards to the + // nodes in a sequential manner. Assuming there's at least 1 node in the set. + let committee = if (self.committee.size() == 0) committee::initialize(distribution) + else self.committee.transition(distribution); + + self.next_committee = option::some(committee); +} + +fun apportionment(self: &StakingInnerV1): (vector, vector) { + let (active_ids, stake) = self.active_set.active_ids_and_stake(); + let n_nodes = stake.length(); + let priorities = vector::tabulate!(n_nodes, |i| n_nodes - i); + let shards = dhondt(priorities, self.n_shards, stake); + (active_ids, shards) +} + +const DHONDT_TOTAL_STAKE_MAX: u64 = 0xFFFF_FFFF; + +// Implementation of the D'Hondt method (aka Jefferson method) for apportionment. +fun dhondt( + // Priorities for the nodes for tie-breaking. Nodes with a higher priority value + // have a higher precedence. + node_priorities: vector, + n_shards: u16, + stake: vector, +): vector { + use std::fixed_point32::{create_from_rational as from_rational, get_raw_value as to_raw}; + + let total_stake = stake.fold!(0, |acc, x| acc + x); + + let scaling = DHONDT_TOTAL_STAKE_MAX + .max(total_stake) + .divide_and_round_up(DHONDT_TOTAL_STAKE_MAX); + let total_stake = total_stake / scaling; + let stake = stake.map!(|s| s / scaling); + + let n_nodes = stake.length(); + let n_shards = n_shards as u64; + assert!(total_stake > 0, ENoStake); + + // Initial assignment following Hagenbach-Bischoff. + // This assigns an initial number of shards to each node, s.t. this does not exceed the final + // assignment. + // The denominator (`total_stake/(n_shards + 1) + 1`) is called "distribution number" and + // is the amount of stake that guarantees receiving a shard with the d'Hondt method. By + // dividing the stake per node by this distribution number and rounding down (integer + // division), we therefore get a lower bound for the number of shards assigned to the node. + let mut shards = stake.map_ref!(|s| *s / (total_stake/(n_shards + 1) + 1)); + // Set up quotients priority queue. + let mut quotients = priority_queue::new(vector[]); + n_nodes.do!(|index| { + let quotient = from_rational(stake[index], shards[index] + 1); + quotients.insert(quotient.to_raw(), index); + }); + + // Set up a priority queue for the ranking of nodes with equal quotient. + let mut equal_quotient_ranking = priority_queue::new(vector[]); + // Priority_queue currently doesn't allow peeking at the head or checking the length. + let mut equal_quotient_ranking_len = 0; + + if (n_nodes == 0) return vector[]; + let mut n_shards_distributed = shards.fold!(0, |acc, x| acc + x); + // loop until all shards are distributed + while (n_shards_distributed != n_shards) { + let index = if (equal_quotient_ranking_len > 0) { + let (_priority, index) = equal_quotient_ranking.pop_max(); + equal_quotient_ranking_len = equal_quotient_ranking_len - 1; + index + } else { + let (quotient, index) = quotients.pop_max(); + equal_quotient_ranking.insert(node_priorities[index], index); + equal_quotient_ranking_len = equal_quotient_ranking_len + 1; + // Condition ensures that `quotients` is not empty. + while (n_nodes > equal_quotient_ranking_len) { + let (next_quotient, next_index) = quotients.pop_max(); + if (next_quotient == quotient) { + equal_quotient_ranking.insert(node_priorities[next_index], next_index); + equal_quotient_ranking_len = equal_quotient_ranking_len + 1; + } else { + quotients.insert(next_quotient, next_index); + break + } + }; + let (_priority, index) = equal_quotient_ranking.pop_max(); + equal_quotient_ranking_len = equal_quotient_ranking_len - 1; + index + }; + *&mut shards[index] = shards[index] + 1; + let quotient = from_rational(stake[index], shards[index] + 1); + quotients.insert(quotient.to_raw(), index); + n_shards_distributed = n_shards_distributed + 1; + }; + shards.map!(|s| s as u16) +} + +/// Initiates the epoch change if the current time allows. +public(package) fun initiate_epoch_change( + self: &mut StakingInnerV1, + clock: &Clock, + rewards: Balance, +) { + let last_epoch_change = match (self.epoch_state) { + EpochState::NextParamsSelected(last_epoch_change) => last_epoch_change, + _ => abort EWrongEpochState, + }; + + let now = clock.timestamp_ms(); + + if (self.epoch == 0) assert!(now >= self.first_epoch_start, EWrongEpochState) + else assert!(now >= last_epoch_change + self.epoch_duration, EWrongEpochState); + + self.advance_epoch(rewards); +} + +/// Sets the next epoch of the system and emits the epoch change start event. +public(package) fun advance_epoch(self: &mut StakingInnerV1, mut rewards: Balance) { + assert!(self.next_committee.is_some(), EWrongEpochState); + + self.epoch = self.epoch + 1; + self.previous_committee = self.committee; + self.committee = self.next_committee.extract(); // overwrites the current committee + self.epoch_state = EpochState::EpochChangeSync(0); + + let wctx = &self.new_walrus_context(); + + // Distribute the rewards. + + // Add any leftover rewards to the rewards to distribute. + let leftover_value = self.leftover_rewards.value(); + rewards.join(self.leftover_rewards.split(leftover_value)); + let rewards_per_shard = rewards.value() / (self.n_shards as u64); + + // Add any nodes that are new in the committee to the previous shard assignments + // without any shards, s.t. we call advance_epoch on them and update the active set. + let mut prev_shard_assignments = *self.previous_committee.inner(); + self.committee.inner().keys().do!(|node_id| if (!prev_shard_assignments.contains(&node_id)) { + prev_shard_assignments.insert(node_id, vector[]); + }); + let (node_ids, shard_assignments) = prev_shard_assignments.into_keys_values(); + + node_ids.zip_do!(shard_assignments, |node_id, shards| { + self.pools[node_id].advance_epoch(rewards.split(rewards_per_shard * shards.length()), wctx); + self + .active_set + .update(node_id, self.pools[node_id].wal_balance_at_epoch(wctx.epoch() + 1)); + }); + + // Save any leftover rewards due to rounding. + self.leftover_rewards.join(rewards); + + // Emit epoch change start event. + events::emit_epoch_change_start(self.epoch); +} + +/// Signals to the contract that the node has received all its shards for the new epoch. +public(package) fun epoch_sync_done( + self: &mut StakingInnerV1, + cap: &mut StorageNodeCap, + epoch: u32, + clock: &Clock, +) { + // Make sure the node hasn't attested yet, and set the new epoch as the last sync done epoch. + assert!(epoch == self.epoch, EInvalidSyncEpoch); + assert!(cap.last_epoch_sync_done() < self.epoch, EDuplicateSyncDone); + cap.set_last_epoch_sync_done(self.epoch); + + assert!(self.committee.inner().contains(&cap.node_id()), ENotInCommittee); + let node_shards = self.committee.shards(&cap.node_id()); + match (self.epoch_state) { + EpochState::EpochChangeSync(weight) => { + let weight = weight + (node_shards.length() as u16); + if (is_quorum(weight, self.n_shards)) { + self.epoch_state = EpochState::EpochChangeDone(clock.timestamp_ms()); + events::emit_epoch_change_done(self.epoch); + } else { + self.epoch_state = EpochState::EpochChangeSync(weight); + } + }, + _ => {}, + }; + // Emit the event that the node has received all shards. + events::emit_shards_received(self.epoch, *node_shards); +} + +/// Checks if the node should either have received the specified shards from the specified node +/// or vice-versa. +/// +/// - also checks that for the provided shards, this function has not been called before +/// - if so, slashes both nodes and emits an event that allows the receiving node to start +/// shard recovery +public fun shard_transfer_failed( + _staking: &mut StakingInnerV1, + _cap: &StorageNodeCap, + _other_node_id: ID, + _shard_ids: vector, +) { + abort ENotImplemented +} + +// === Accessors === + +/// Returns the Option with next committee. +public(package) fun next_committee(self: &StakingInnerV1): &Option { + &self.next_committee +} + +/// Returns the next epoch parameters if set, otherwise aborts with an error. +public(package) fun next_epoch_params(self: &StakingInnerV1): EpochParams { + *self.next_epoch_params.borrow() +} + +/// Get the current epoch. +public(package) fun epoch(self: &StakingInnerV1): u32 { + self.epoch +} + +/// Get the current committee. +public(package) fun committee(self: &StakingInnerV1): &Committee { + &self.committee +} + +/// Get the previous committee. +public(package) fun previous_committee(self: &StakingInnerV1): &Committee { + &self.previous_committee +} + +/// Construct the BLS committee for the next epoch. +public(package) fun next_bls_committee(self: &StakingInnerV1): BlsCommittee { + let (ids, shard_assignments) = (*self.next_committee.borrow().inner()).into_keys_values(); + let members = ids.zip_map!(shard_assignments, |id, shards| { + let pk = self.pools.borrow(id).node_info().next_epoch_public_key(); + bls_aggregate::new_bls_committee_member(*pk, shards.length() as u16, id) + }); + bls_aggregate::new_bls_committee(self.epoch + 1, members) +} + +/// Check if a node with the given `ID` exists in the staking pools. +public(package) fun has_pool(self: &StakingInnerV1, node_id: ID): bool { + self.pools.contains(node_id) +} + +// === Internal === + +fun new_walrus_context(self: &StakingInnerV1): WalrusContext { + walrus_context::new( + self.epoch, + self.next_committee.is_some(), + self.committee.to_inner(), + ) +} + +fun is_quorum(weight: u16, n_shards: u16): bool { + 3 * (weight as u64) >= 2 * (n_shards as u64) + 1 +} + +// ==== Tests === +#[test_only] +use walrus::test_utils::assert_eq; + +#[test_only] +public(package) fun is_epoch_sync_done(self: &StakingInnerV1): bool { + match (self.epoch_state) { + EpochState::EpochChangeDone(_) => true, + _ => false, + } +} + +#[test_only] +public(package) fun active_set(self: &mut StakingInnerV1): &mut ActiveSet { + &mut self.active_set +} + +#[test_only] +#[syntax(index)] +/// Get the pool with the given `ID`. +public(package) fun borrow(self: &StakingInnerV1, node_id: ID): &StakingPool { + &self.pools[node_id] +} + +#[test_only] +#[syntax(index)] +/// Get mutable reference to the pool with the given `ID`. +public(package) fun borrow_mut(self: &mut StakingInnerV1, node_id: ID): &mut StakingPool { + &mut self.pools[node_id] +} + +#[test_only] +public(package) fun pub_dhondt(n_shards: u16, stake: vector): vector { + let n_nodes = stake.length(); + let priorities = vector::tabulate!(n_nodes, |i| n_nodes - i); + dhondt(priorities, n_shards, stake) +} + +#[test] +fun test_quorum_above() { + let mut queue = priority_queue::new(vector[]); + let votes = vector[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let weights = vector[5, 5, 4, 6, 3, 7, 2, 8, 1, 9]; + votes.zip_do!(weights, |vote, weight| queue.insert(vote, weight)); + assert_eq!(quorum_above(&mut queue, 50), 4); +} + +#[test] +fun test_quorum_above_all_above() { + let mut queue = priority_queue::new(vector[]); + let votes = vector[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let weights = vector[17, 1, 1, 1, 3, 7, 2, 8, 1, 9]; + votes.zip_do!(weights, |vote, weight| queue.insert(vote, weight)); + assert_eq!(quorum_above(&mut queue, 50), 1); +} + +#[test] +fun test_quorum_above_one_value() { + let mut queue = priority_queue::new(vector[]); + queue.insert(1, 50); + assert_eq!(quorum_above(&mut queue, 50), 1); +} + +#[test] +fun test_quorum_below() { + let mut queue = priority_queue::new(vector[]); + let votes = vector[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let weights = vector[5, 5, 4, 6, 3, 7, 4, 6, 1, 9]; + votes.zip_do!(weights, |vote, weight| queue.insert(vote, weight)); + assert_eq!(quorum_below(&mut queue, 50), 7); +} + +#[test] +fun test_quorum_below_all_below() { + let mut queue = priority_queue::new(vector[]); + let votes = vector[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let weights = vector[5, 5, 4, 6, 3, 7, 1, 1, 1, 17]; + votes.zip_do!(weights, |vote, weight| queue.insert(vote, weight)); + assert_eq!(quorum_below(&mut queue, 50), 10); +} + +#[test] +fun test_quorum_below_one_value() { + let mut queue = priority_queue::new(vector[]); + queue.insert(1, 50); + assert_eq!(quorum_below(&mut queue, 50), 1); +} diff --git a/contracts/walrus/sources/staking/staking_pool.move b/contracts/walrus/sources/staking/staking_pool.move new file mode 100644 index 00000000..fe0c3018 --- /dev/null +++ b/contracts/walrus/sources/staking/staking_pool.move @@ -0,0 +1,480 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Module: staking_pool +module walrus::staking_pool; + +use std::string::String; +use sui::{balance::{Self, Balance}, table::{Self, Table}}; +use wal::wal::WAL; +use walrus::{ + messages, + pending_values::{Self, PendingValues}, + pool_exchange_rate::{Self, PoolExchangeRate}, + staked_wal::{Self, StakedWal}, + storage_node::{Self, StorageNodeInfo}, + walrus_context::WalrusContext +}; + +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const EPoolAlreadyUpdated: u64 = 0; +const ECalculationError: u64 = 1; +const EIncorrectEpochAdvance: u64 = 2; +const EPoolNotEmpty: u64 = 3; +const EInvalidProofOfPossession: u64 = 4; + +/// Represents the state of the staking pool. +public enum PoolState has store, copy, drop { + // The pool is new and awaits the stake to be added. + New, + // The pool is active and can accept stakes. + Active, + // The pool awaits the stake to be withdrawn. The value inside the + // variant is the epoch in which the pool will be withdrawn. + Withdrawing(u32), + // The pool is empty and can be destroyed. + Withdrawn, +} + +/// The parameters for the staking pool. Stored for the next epoch. +public struct VotingParams has store, copy, drop { + /// Voting: storage price for the next epoch. + storage_price: u64, + /// Voting: write price for the next epoch. + write_price: u64, + /// Voting: node capacity for the next epoch. + node_capacity: u64, +} + +/// Represents a single staking pool for a token. Even though it is never +/// transferred or shared, the `key` ability is added for discoverability +/// in the `ObjectTable`. +public struct StakingPool has key, store { + id: UID, + /// The current state of the pool. + state: PoolState, + /// Current epoch's pool parameters. + voting_params: VotingParams, + /// The storage node info for the pool. + node_info: StorageNodeInfo, + /// The epoch when the pool is / will be activated. + /// Serves information purposes only, the checks are performed in the `state` + /// property. + activation_epoch: u32, + /// Epoch when the pool was last updated. + latest_epoch: u32, + /// Currently staked WAL in the pool + rewards pool. + wal_balance: u64, + /// Balance of the pool token in the pool in the current epoch. + pool_token_balance: u64, + /// The amount of the pool token that will be withdrawn in E+1 or E+2. + /// We use this amount to calculate the WAL withdrawal in the + /// `process_pending_stake`. + pending_pool_token_withdraw: PendingValues, + /// The commission rate for the pool. + commission_rate: u64, + /// Historical exchange rates for the pool. The key is the epoch when the + /// exchange rate was set, and the value is the exchange rate (the ratio of + /// the amount of WAL tokens for the pool token). + exchange_rates: Table, + /// The amount of stake that will be added to the `wal_balance`. Can hold + /// up to two keys: E+1 and E+2, due to the differences in the activation + /// epoch. + /// + /// ``` + /// E+1 -> Balance + /// E+2 -> Balance + /// ``` + /// + /// Single key is cleared in the `advance_epoch` function, leaving only the + /// next epoch's stake. + pending_stake: PendingValues, + /// The rewards that the pool has received from being in the committee. + rewards_pool: Balance, +} + +/// Create a new `StakingPool` object. +/// If committee is selected, the pool will be activated in the next epoch. +/// Otherwise, it will be activated in the current epoch. +public(package) fun new( + name: String, + network_address: String, + public_key: vector, + network_public_key: vector, + proof_of_possession: vector, + commission_rate: u64, + storage_price: u64, + write_price: u64, + node_capacity: u64, + wctx: &WalrusContext, + ctx: &mut TxContext, +): StakingPool { + let id = object::new(ctx); + let node_id = id.to_inner(); + + // Verify proof of possession + assert!( + messages::new_proof_of_possession_msg( + wctx.epoch(), + ctx.sender(), + public_key, + ).verify_proof_of_possession(proof_of_possession), + EInvalidProofOfPossession, + ); + + let (activation_epoch, state) = if (wctx.committee_selected()) { + (wctx.epoch() + 1, PoolState::New) + } else { + (wctx.epoch(), PoolState::Active) + }; + + let mut exchange_rates = table::new(ctx); + exchange_rates.add(activation_epoch, pool_exchange_rate::empty()); + + StakingPool { + id, + state, + exchange_rates, + voting_params: VotingParams { + storage_price, + write_price, + node_capacity, + }, + node_info: storage_node::new( + name, + node_id, + network_address, + public_key, + network_public_key, + ), + commission_rate, + activation_epoch, + latest_epoch: wctx.epoch(), + pending_stake: pending_values::empty(), + pending_pool_token_withdraw: pending_values::empty(), + wal_balance: 0, + pool_token_balance: 0, + rewards_pool: balance::zero(), + } +} + +/// Set the state of the pool to `Withdrawing`. +public(package) fun set_withdrawing(pool: &mut StakingPool, wctx: &WalrusContext) { + assert!(!pool.is_withdrawing()); + pool.state = PoolState::Withdrawing(wctx.epoch() + 1); +} + +/// Stake the given amount of WAL in the pool. +public(package) fun stake( + pool: &mut StakingPool, + to_stake: Balance, + wctx: &WalrusContext, + ctx: &mut TxContext, +): StakedWal { + assert!(pool.is_active() || pool.is_new()); + assert!(to_stake.value() > 0); + + let current_epoch = wctx.epoch(); + let activation_epoch = if (wctx.committee_selected()) { + current_epoch + 2 + } else { + current_epoch + 1 + }; + + let staked_amount = to_stake.value(); + let staked_wal = staked_wal::mint( + pool.id.to_inner(), + to_stake, + activation_epoch, + ctx, + ); + + // Add the stake to the pending stake either for E+1 or E+2. + pool.pending_stake.insert_or_add(activation_epoch, staked_amount); + staked_wal +} + +/// Request withdrawal of the given amount from the staked WAL. +/// Marks the `StakedWal` as withdrawing and updates the activation epoch. +public(package) fun request_withdraw_stake( + pool: &mut StakingPool, + staked_wal: &mut StakedWal, + wctx: &WalrusContext, +) { + assert!(!pool.is_new()); + assert!(staked_wal.value() > 0); + assert!(staked_wal.node_id() == pool.id.to_inner()); + assert!(staked_wal.activation_epoch() <= wctx.epoch()); + + // If the node is in the committee, the stake will be withdrawn in E+2, + // otherwise in E+1. + let withdraw_epoch = if (wctx.committee_selected()) { + wctx.epoch() + 2 + } else { + wctx.epoch() + 1 + }; + + let principal_amount = staked_wal.value(); + let token_amount = pool + .exchange_rate_at_epoch(staked_wal.activation_epoch()) + .get_token_amount(principal_amount); + + pool.pending_pool_token_withdraw.insert_or_add(withdraw_epoch, token_amount); + staked_wal.set_withdrawing(withdraw_epoch, token_amount); +} + +/// Perform the withdrawal of the staked WAL, returning the amount to the caller. +public(package) fun withdraw_stake( + pool: &mut StakingPool, + staked_wal: StakedWal, + wctx: &WalrusContext, +): Balance { + assert!(!pool.is_new()); + assert!(staked_wal.value() > 0); + assert!(staked_wal.node_id() == pool.id.to_inner()); + assert!(staked_wal.withdraw_epoch() <= wctx.epoch()); + assert!(staked_wal.activation_epoch() <= wctx.epoch()); + assert!(staked_wal.is_withdrawing()); + + // withdraw epoch and pool token amount are stored in the `StakedWal` + let token_amount = staked_wal.pool_token_amount(); + let withdraw_epoch = staked_wal.withdraw_epoch(); + + // calculate the total amount to withdraw by converting token amount via the exchange rate + let total_amount = pool.exchange_rate_at_epoch(withdraw_epoch).get_wal_amount(token_amount); + let principal = staked_wal.into_balance(); + let rewards_amount = if (total_amount >= principal.value()) { + total_amount - principal.value() + } else 0; + + // withdraw rewards. due to rounding errors, there's a chance that the + // rewards amount is higher than the rewards pool, in this case, we + // withdraw the maximum amount possible + let rewards_amount = rewards_amount.min(pool.rewards_pool.value()); + let mut to_withdraw = pool.rewards_pool.split(rewards_amount); + to_withdraw.join(principal); + to_withdraw +} + +/// Advance epoch for the `StakingPool`. +public(package) fun advance_epoch( + pool: &mut StakingPool, + rewards: Balance, + wctx: &WalrusContext, +) { + // process the pending and withdrawal amounts + let current_epoch = wctx.epoch(); + + assert!(current_epoch > pool.latest_epoch, EPoolAlreadyUpdated); + assert!(rewards.value() == 0 || pool.wal_balance > 0, EIncorrectEpochAdvance); + + // if rewards are calculated only for full epochs, rewards addition should + // happen prior to pool token calculation. Otherwise we can add then to the + // final rate instead of the + let rewards_amount = rewards.value(); + pool.rewards_pool.join(rewards); + pool.wal_balance = pool.wal_balance + rewards_amount; + pool.latest_epoch = current_epoch; + pool.node_info.rotate_public_key(); + + process_pending_stake(pool, wctx) +} + +/// Process the pending stake and withdrawal requests for the pool. Called in the +/// `advance_epoch` function in case the pool is in the committee and receives the +/// rewards. And may be called in user-facing functions to update the pool state, +/// if the pool is not in the committee. +/// +/// Additions: +/// - `WAL` is added to the `wal_balance` directly. +/// - Pool Token is added to the `pool_token_balance` via the exchange rate. +/// +/// Withdrawals: +/// - `WAL` withdrawal is processed via the exchange rate and pool token. +/// - Pool Token withdrawal is processed directly. +public(package) fun process_pending_stake(pool: &mut StakingPool, wctx: &WalrusContext) { + let current_epoch = wctx.epoch(); + + // do the withdrawals reduction for both + let token_withdraw = pool.pending_pool_token_withdraw.flush(wctx.epoch()); + let exchange_rate = pool_exchange_rate::new( + pool.wal_balance, + pool.pool_token_balance, + ); + + let pending_withdrawal = exchange_rate.get_wal_amount(token_withdraw); + pool.pool_token_balance = pool.pool_token_balance - token_withdraw; + + // check that the amount is not higher than the pool balance + assert!(pool.wal_balance >= pending_withdrawal, ECalculationError); + pool.wal_balance = pool.wal_balance - pending_withdrawal; + + // recalculate the additions + pool.wal_balance = pool.wal_balance + pool.pending_stake.flush(current_epoch); + pool.pool_token_balance = exchange_rate.get_token_amount(pool.wal_balance); + pool.exchange_rates.add(current_epoch, exchange_rate); +} + +// === Pool parameters === + +/// Sets the next commission rate for the pool. +public(package) fun set_next_commission(pool: &mut StakingPool, commission_rate: u64) { + pool.commission_rate = commission_rate; +} + +/// Sets the next storage price for the pool. +public(package) fun set_next_storage_price(pool: &mut StakingPool, storage_price: u64) { + pool.voting_params.storage_price = storage_price; +} + +/// Sets the next write price for the pool. +public(package) fun set_next_write_price(pool: &mut StakingPool, write_price: u64) { + pool.voting_params.write_price = write_price; +} + +/// Sets the next node capacity for the pool. +public(package) fun set_next_node_capacity(pool: &mut StakingPool, node_capacity: u64) { + pool.voting_params.node_capacity = node_capacity; +} + +/// Sets the public key to be used starting from the next epoch for which the node is selected. +public(package) fun set_next_public_key( + self: &mut StakingPool, + public_key: vector, + proof_of_possession: vector, + wctx: &WalrusContext, + ctx: &TxContext, +) { + // Verify proof of possession + assert!( + messages::new_proof_of_possession_msg( + wctx.epoch(), + ctx.sender(), + public_key, + ).verify_proof_of_possession(proof_of_possession), + EInvalidProofOfPossession, + ); + self.node_info.set_next_public_key(public_key); +} + +/// Sets the name of the storage node. +public(package) fun set_name(self: &mut StakingPool, name: String) { + self.node_info.set_name(name); +} + +/// Sets the network address or host of the storage node. +public(package) fun set_network_address(self: &mut StakingPool, network_address: String) { + self.node_info.set_network_address(network_address); +} + +/// Sets the public key used for TLS communication. +public(package) fun set_network_public_key(self: &mut StakingPool, network_public_key: vector) { + self.node_info.set_network_public_key(network_public_key); +} + +/// Destroy the pool if it is empty. +public(package) fun destroy_empty(pool: StakingPool) { + assert!(pool.is_empty(), EPoolNotEmpty); + + let StakingPool { + id, + pending_stake, + exchange_rates, + rewards_pool, + .., + } = pool; + + id.delete(); + exchange_rates.drop(); + rewards_pool.destroy_zero(); + + let (_epochs, pending_stakes) = pending_stake.unwrap().into_keys_values(); + pending_stakes.do!(|stake| assert!(stake == 0)); +} + +/// Set the state of the pool to `Active`. +public(package) fun set_is_active(pool: &mut StakingPool) { + assert!(pool.is_new()); + pool.state = PoolState::Active; +} + +/// Returns the exchange rate for the given current or future epoch. If there +/// isn't a value for the specified epoch, it will look for the most recent +/// value down to the pool activation epoch. +public(package) fun exchange_rate_at_epoch(pool: &StakingPool, mut epoch: u32): PoolExchangeRate { + let activation_epoch = pool.activation_epoch; + while (epoch >= activation_epoch) { + if (pool.exchange_rates.contains(epoch)) { + return pool.exchange_rates[epoch] + }; + epoch = epoch - 1; + }; + + pool_exchange_rate::empty() +} + +/// Returns the expected active stake for current or future epoch `E` for the pool. +/// It processes the pending stake and withdrawal requests from the current epoch +/// to `E`. +/// +/// Should be the main function to calculate the active stake for the pool at +/// the given epoch, due to the complexity of the pending stake and withdrawal +/// requests, and lack of immediate updates. +public(package) fun wal_balance_at_epoch(pool: &StakingPool, epoch: u32): u64 { + let mut expected = pool.wal_balance; + let exchange_rate = pool_exchange_rate::new(pool.wal_balance, pool.pool_token_balance); + let token_withdraw = pool.pending_pool_token_withdraw.value_at(epoch); + let pending_withdrawal = exchange_rate.get_wal_amount(token_withdraw); + + expected = expected + pool.pending_stake.value_at(epoch); + expected = expected - pending_withdrawal; + expected +} + +// === Accessors === + +/// Returns the commission rate for the pool. +public(package) fun commission_rate(pool: &StakingPool): u64 { pool.commission_rate } + +/// Returns the rewards amount for the pool. +public(package) fun rewards_amount(pool: &StakingPool): u64 { pool.rewards_pool.value() } + +/// Returns the rewards for the pool. +public(package) fun wal_balance(pool: &StakingPool): u64 { pool.wal_balance } + +/// Returns the storage price for the pool. +public(package) fun storage_price(pool: &StakingPool): u64 { pool.voting_params.storage_price } + +/// Returns the write price for the pool. +public(package) fun write_price(pool: &StakingPool): u64 { pool.voting_params.write_price } + +/// Returns the node capacity for the pool. +public(package) fun node_capacity(pool: &StakingPool): u64 { pool.voting_params.node_capacity } + +/// Returns the activation epoch for the pool. +public(package) fun activation_epoch(pool: &StakingPool): u32 { pool.activation_epoch } + +/// Returns the node info for the pool. +public(package) fun node_info(pool: &StakingPool): &StorageNodeInfo { &pool.node_info } + +/// Returns `true` if the pool is empty. +public(package) fun is_new(pool: &StakingPool): bool { pool.state == PoolState::New } + +/// Returns `true` if the pool is active. +public(package) fun is_active(pool: &StakingPool): bool { pool.state == PoolState::Active } + +/// Returns `true` if the pool is withdrawing. +public(package) fun is_withdrawing(pool: &StakingPool): bool { + match (pool.state) { + PoolState::Withdrawing(_) => true, + _ => false, + } +} + +/// Returns `true` if the pool is empty. +public(package) fun is_empty(pool: &StakingPool): bool { + let pending_stake = pool.pending_stake.unwrap(); + let non_empty = pending_stake.keys().count!(|epoch| pending_stake[epoch] != 0); + + pool.wal_balance == 0 && non_empty == 0 && pool.pool_token_balance == 0 +} diff --git a/contracts/walrus/sources/staking/storage_node.move b/contracts/walrus/sources/staking/storage_node.move new file mode 100644 index 00000000..2d8c6c69 --- /dev/null +++ b/contracts/walrus/sources/staking/storage_node.move @@ -0,0 +1,158 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_field, unused_function, unused_variable, unused_use)] +module walrus::storage_node; + +use std::string::String; +use sui::{bls12381::{G1, g1_from_bytes}, group_ops::Element}; +use walrus::event_blob::EventBlobAttestation; + +// Error codes +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const EInvalidNetworkPublicKey: u64 = 0; + +/// Represents a storage node in the system. +public struct StorageNodeInfo has store, copy, drop { + name: String, + node_id: ID, + network_address: String, + public_key: Element, + next_epoch_public_key: Option>, + network_public_key: vector, +} + +/// A Capability which represents a storage node and authorizes the holder to +/// perform operations on the storage node. +public struct StorageNodeCap has key, store { + id: UID, + node_id: ID, + last_epoch_sync_done: u32, + last_event_blob_attestation: Option, +} + +/// A public constructor for the StorageNodeInfo. +public(package) fun new( + name: String, + node_id: ID, + network_address: String, + public_key: vector, + network_public_key: vector, +): StorageNodeInfo { + assert!(network_public_key.length() == 33, EInvalidNetworkPublicKey); + StorageNodeInfo { + node_id, + name, + network_address, + public_key: g1_from_bytes(&public_key), + next_epoch_public_key: option::none(), + network_public_key, + } +} + +/// Create a new storage node capability. +public(package) fun new_cap(node_id: ID, ctx: &mut TxContext): StorageNodeCap { + StorageNodeCap { + id: object::new(ctx), + node_id, + last_epoch_sync_done: 0, + last_event_blob_attestation: option::none(), + } +} + +// === Accessors === + +/// Return the public key of the storage node. +public(package) fun public_key(self: &StorageNodeInfo): &Element { + &self.public_key +} + +/// Return the public key of the storage node for the next epoch. +public(package) fun next_epoch_public_key(self: &StorageNodeInfo): &Element { + self.next_epoch_public_key.borrow_with_default(&self.public_key) +} + +/// Return the node ID of the storage node. +public fun id(cap: &StorageNodeInfo): ID { cap.node_id } + +/// Return the pool ID of the storage node. +public fun node_id(cap: &StorageNodeCap): ID { cap.node_id } + +/// Return the last epoch in which the storage node attested that it has +/// finished syncing. +public fun last_epoch_sync_done(cap: &StorageNodeCap): u32 { + cap.last_epoch_sync_done +} + +/// Return the latest event blob attestion. +public fun last_event_blob_attestation(cap: &mut StorageNodeCap): Option { + cap.last_event_blob_attestation +} + +// === Modifiers === + +/// Set the last epoch in which the storage node attested that it has finished syncing. +public(package) fun set_last_epoch_sync_done(self: &mut StorageNodeCap, epoch: u32) { + self.last_epoch_sync_done = epoch; +} + +/// Set the last epoch in which the storage node attested that it has finished syncing. +public(package) fun set_last_event_blob_attestation( + self: &mut StorageNodeCap, + attestation: EventBlobAttestation, +) { + self.last_event_blob_attestation = option::some(attestation); +} + +/// Sets the public key to be used starting from the next epoch for which the node is selected. +public(package) fun set_next_public_key(self: &mut StorageNodeInfo, public_key: vector) { + self.next_epoch_public_key.swap_or_fill(g1_from_bytes(&public_key)); +} + +/// Sets the name of the storage node. +public(package) fun set_name(self: &mut StorageNodeInfo, name: String) { + self.name = name; +} + +/// Sets the network address or host of the storage node. +public(package) fun set_network_address(self: &mut StorageNodeInfo, network_address: String) { + self.network_address = network_address; +} + +/// Sets the public key used for TLS communication. +public(package) fun set_network_public_key( + self: &mut StorageNodeInfo, + network_public_key: vector, +) { + self.network_public_key = network_public_key; +} + +/// Set the public key to the next epochs public key. +public(package) fun rotate_public_key(self: &mut StorageNodeInfo) { + if (self.next_epoch_public_key.is_some()) { + self.public_key = self.next_epoch_public_key.extract() + } +} + +// === Testing === + +#[test_only] +/// Create a storage node with dummy name & address +public fun new_for_testing(public_key: vector): StorageNodeInfo { + let ctx = &mut tx_context::dummy(); + let node_id = ctx.fresh_object_address().to_id(); + StorageNodeInfo { + node_id, + name: b"node".to_string(), + network_address: b"127.0.0.1".to_string(), + public_key: g1_from_bytes(&public_key), + next_epoch_public_key: option::none(), + network_public_key: x"820e2b273530a00de66c9727c40f48be985da684286983f398ef7695b8a44677ab", + } +} + +#[test_only] +public fun destroy_cap_for_testing(cap: StorageNodeCap) { + let StorageNodeCap { id, .. } = cap; + id.delete(); +} diff --git a/contracts/walrus/sources/staking/walrus_context.move b/contracts/walrus/sources/staking/walrus_context.move new file mode 100644 index 00000000..03935228 --- /dev/null +++ b/contracts/walrus/sources/staking/walrus_context.move @@ -0,0 +1,42 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Module: `walrus_context` +/// +/// Implements the `WalrusContext` struct which is used to store the current +/// state of the system. Improves testing and readability of signatures by +/// aggregating the parameters into a single struct. Context is used almost +/// everywhere in the system, so it is important to have a single source of +/// truth for the current state. +module walrus::walrus_context; + +use sui::vec_map::VecMap; + +/// Represents the current values in the Walrus system. Helps avoid passing +/// too many parameters to functions, and allows for easier testing. +public struct WalrusContext has drop { + /// Current Walrus epoch + epoch: u32, + /// Whether the committee has been selected for the next epoch. + committee_selected: bool, + /// The current committee in the system. + committee: VecMap>, +} + +/// Create a new `WalrusContext` object. +public(package) fun new( + epoch: u32, + committee_selected: bool, + committee: VecMap>, +): WalrusContext { + WalrusContext { epoch, committee_selected, committee } +} + +/// Read the current `epoch` from the context. +public(package) fun epoch(self: &WalrusContext): u32 { self.epoch } + +/// Read the current `committee_selected` from the context. +public(package) fun committee_selected(self: &WalrusContext): bool { self.committee_selected } + +/// Read the current `committee` from the context. +public(package) fun committee(self: &WalrusContext): &VecMap> { &self.committee } diff --git a/contracts/walrus/sources/system.move b/contracts/walrus/sources/system.move new file mode 100644 index 00000000..480d648b --- /dev/null +++ b/contracts/walrus/sources/system.move @@ -0,0 +1,223 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_variable, unused_function, unused_field, unused_mut_parameter)] +/// Module: system +module walrus::system; + +use sui::{balance::Balance, coin::Coin, dynamic_object_field}; +use wal::wal::WAL; +use walrus::{ + blob::Blob, + bls_aggregate::BlsCommittee, + epoch_parameters::EpochParams, + storage_node::StorageNodeCap, + storage_resource::Storage, + system_state_inner::{Self, SystemStateInnerV1} +}; + +/// Flag to indicate the version of the system. +const VERSION: u64 = 0; + +/// The one and only system object. +public struct System has key { + id: UID, + version: u64, +} + +/// Creates and shares an empty system object. +/// Must only be called by the initialization function. +public(package) fun create_empty(max_epochs_ahead: u32, ctx: &mut TxContext) { + let mut system = System { id: object::new(ctx), version: VERSION }; + let system_state_inner = system_state_inner::create_empty(max_epochs_ahead, ctx); + dynamic_object_field::add(&mut system.id, VERSION, system_state_inner); + transfer::share_object(system); +} + +/// Marks blob as invalid given an invalid blob certificate. +public fun invalidate_blob_id( + system: &System, + signature: vector, + members: vector, + message: vector, +): u256 { + system.inner().invalidate_blob_id(signature, members, message) +} + +/// Certifies a blob containing Walrus events. +public fun certify_event_blob( + system: &mut System, + cap: &mut StorageNodeCap, + blob_id: u256, + root_hash: u256, + size: u64, + encoding_type: u8, + ending_checkpoint_sequence_num: u64, + epoch: u32, + ctx: &mut TxContext, +) { + system + .inner_mut() + .certify_event_blob( + cap, + blob_id, + root_hash, + size, + encoding_type, + ending_checkpoint_sequence_num, + epoch, + ctx, + ) +} + +/// Allows buying a storage reservation for a given period of epochs. +public fun reserve_space( + self: &mut System, + storage_amount: u64, + epochs_ahead: u32, + payment: &mut Coin, + ctx: &mut TxContext, +): Storage { + self.inner_mut().reserve_space(storage_amount, epochs_ahead, payment, ctx) +} + +/// Registers a new blob in the system. +/// `size` is the size of the unencoded blob. The reserved space in `storage` must be at +/// least the size of the encoded blob. +public fun register_blob( + self: &mut System, + storage: Storage, + blob_id: u256, + root_hash: u256, + size: u64, + encoding_type: u8, + deletable: bool, + write_payment: &mut Coin, + ctx: &mut TxContext, +): Blob { + self + .inner_mut() + .register_blob( + storage, + blob_id, + root_hash, + size, + encoding_type, + deletable, + write_payment, + ctx, + ) +} + +/// Certify that a blob will be available in the storage system until the end epoch of the +/// storage associated with it. +public fun certify_blob( + self: &System, + blob: &mut Blob, + signature: vector, + signers: vector, + message: vector, +) { + self.inner().certify_blob(blob, signature, signers, message); +} + +/// Deletes a deletable blob and returns the contained storage resource. +public fun delete_blob(self: &System, blob: Blob): Storage { + self.inner().delete_blob(blob) +} + +/// Extend the period of validity of a blob with a new storage resource. +/// The new storage resource must be the same size as the storage resource +/// used in the blob, and have a longer period of validity. +public fun extend_blob_with_resource(self: &System, blob: &mut Blob, extension: Storage) { + self.inner().extend_blob_with_resource(blob, extension); +} + +/// Extend the period of validity of a blob by extending its contained storage resource. +public fun extend_blob( + self: &mut System, + blob: &mut Blob, + epochs_ahead: u32, + payment: &mut Coin, +) { + self.inner_mut().extend_blob(blob, epochs_ahead, payment); +} + +// === Public Accessors === + +/// Get epoch. Uses the committee to get the epoch. +public fun epoch(self: &System): u32 { + self.inner().epoch() +} + +/// Accessor for total capacity size. +public fun total_capacity_size(self: &System): u64 { + self.inner().total_capacity_size() +} + +/// Accessor for used capacity size. +public fun used_capacity_size(self: &System): u64 { + self.inner().used_capacity_size() +} + +/// Accessor for the number of shards. +public fun n_shards(self: &System): u16 { + self.inner().n_shards() +} + +// === Restricted to Package === + +/// Accessor for the current committee. +public(package) fun committee(self: &System): &BlsCommittee { + self.inner().committee() +} + +#[test_only] +public(package) fun committee_mut(self: &mut System): &mut BlsCommittee { + self.inner_mut().committee_mut() +} + +/// Update epoch to next epoch, and update the committee, price and capacity. +/// +/// Called by the epoch change function that connects `Staking` and `System`. Returns +/// the balance of the rewards from the previous epoch. +public(package) fun advance_epoch( + self: &mut System, + new_committee: BlsCommittee, + new_epoch_params: EpochParams, +): Balance { + self.inner_mut().advance_epoch(new_committee, new_epoch_params) +} + +// === Internals === + +/// Get a mutable reference to `SystemStateInner` from the `System`. +fun inner_mut(system: &mut System): &mut SystemStateInnerV1 { + assert!(system.version == VERSION); + dynamic_object_field::borrow_mut(&mut system.id, VERSION) +} + +/// Get an immutable reference to `SystemStateInner` from the `System`. +public(package) fun inner(system: &System): &SystemStateInnerV1 { + assert!(system.version == VERSION); + dynamic_object_field::borrow(&system.id, VERSION) +} + +// === Testing === + +#[test_only] +public(package) fun new_for_testing(): System { + let ctx = &mut tx_context::dummy(); + let mut system = System { id: object::new(ctx), version: VERSION }; + let system_state_inner = system_state_inner::new_for_testing(); + dynamic_object_field::add(&mut system.id, VERSION, system_state_inner); + system +} + +#[test_only] +public(package) fun new_for_testing_with_multiple_members(ctx: &mut TxContext): System { + let mut system = System { id: object::new(ctx), version: VERSION }; + let system_state_inner = system_state_inner::new_for_testing_with_multiple_members(ctx); + dynamic_object_field::add(&mut system.id, VERSION, system_state_inner); + system +} diff --git a/contracts/walrus/sources/system/blob.move b/contracts/walrus/sources/system/blob.move new file mode 100644 index 00000000..e5df5846 --- /dev/null +++ b/contracts/walrus/sources/system/blob.move @@ -0,0 +1,307 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::blob; + +use sui::{bcs, dynamic_field, hash}; +use std::string::String; +use walrus::{ + encoding, + events::{emit_blob_registered, emit_blob_certified, emit_blob_deleted}, + messages::CertifiedBlobMessage, + metadata::Metadata, + storage_resource::Storage, +}; + +// Error codes +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const ENotCertified: u64 = 0; +const EBlobNotDeletable: u64 = 1; +const EResourceBounds: u64 = 2; +const EResourceSize: u64 = 3; +const EWrongEpoch: u64 = 4; +const EAlreadyCertified: u64 = 5; +const EInvalidBlobId: u64 = 6; + +// The fixed dynamic filed name for metadata +const METADATA_DF: vector = b"metadata"; + +// === Object definitions === + +/// The blob structure represents a blob that has been registered to with some storage, +/// and then may eventually be certified as being available in the system. +public struct Blob has key, store { + id: UID, + registered_epoch: u32, + blob_id: u256, + size: u64, + encoding_type: u8, + // Stores the epoch first certified. + certified_epoch: option::Option, + storage: Storage, + // Marks if this blob can be deleted. + deletable: bool, +} + +// === Accessors === + +public fun registered_epoch(self: &Blob): u32 { + self.registered_epoch +} + +public fun blob_id(self: &Blob): u256 { + self.blob_id +} + +public fun size(self: &Blob): u64 { + self.size +} + +public fun encoding_type(self: &Blob): u8 { + self.encoding_type +} + +public fun certified_epoch(self: &Blob): &Option { + &self.certified_epoch +} + +public fun storage(self: &Blob): &Storage { + &self.storage +} + +public fun encoded_size(self: &Blob, n_shards: u16): u64 { + encoding::encoded_blob_length( + self.size, + self.encoding_type, + n_shards, + ) +} + +public(package) fun storage_mut(self: &mut Blob): &mut Storage { + &mut self.storage +} + +public fun end_epoch(self: &Blob): u32 { + self.storage.end_epoch() +} + +/// Aborts if the blob is not certified or already expired. +public(package) fun assert_certified_not_expired(self: &Blob, current_epoch: u32) { + // Assert this is a certified blob + assert!(self.certified_epoch.is_some(), ENotCertified); + + // Check the blob is within its availability period + assert!(current_epoch < self.storage.end_epoch(), EResourceBounds); +} + +public struct BlobIdDerivation has drop { + encoding_type: u8, + size: u64, + root_hash: u256, +} + +/// Derives the blob_id for a blob given the root_hash, encoding_type and size. +public(package) fun derive_blob_id(root_hash: u256, encoding_type: u8, size: u64): u256 { + let blob_id_struct = BlobIdDerivation { + encoding_type, + size, + root_hash, + }; + + let serialized = bcs::to_bytes(&blob_id_struct); + let encoded = hash::blake2b256(&serialized); + let mut decoder = bcs::new(encoded); + let blob_id = decoder.peel_u256(); + blob_id +} + +/// Creates a new blob in `registered_epoch`. +/// `size` is the size of the unencoded blob. The reserved space in `storage` must be at +/// least the size of the encoded blob. +public(package) fun new( + storage: Storage, + blob_id: u256, + root_hash: u256, + size: u64, + encoding_type: u8, + deletable: bool, + registered_epoch: u32, + n_shards: u16, + ctx: &mut TxContext, +): Blob { + let id = object::new(ctx); + + // Check resource bounds. + assert!(registered_epoch >= storage.start_epoch(), EResourceBounds); + assert!(registered_epoch < storage.end_epoch(), EResourceBounds); + + // check that the encoded size is less than the storage size + let encoded_size = encoding::encoded_blob_length( + size, + encoding_type, + n_shards, + ); + assert!(encoded_size <= storage.storage_size(), EResourceSize); + + // Cryptographically verify that the Blob ID authenticates + // both the size and fe_type. + assert!(derive_blob_id(root_hash, encoding_type, size) == blob_id, EInvalidBlobId); + + // Emit register event + emit_blob_registered( + registered_epoch, + blob_id, + size, + encoding_type, + storage.end_epoch(), + deletable, + id.to_inner(), + ); + + Blob { + id, + registered_epoch, + blob_id, + size, + encoding_type, + certified_epoch: option::none(), + storage, + deletable, + } +} + +/// Certifies that a blob will be available in the storage system until the end epoch of the +/// storage associated with it, given a [`CertifiedBlobMessage`]. +public(package) fun certify_with_certified_msg( + blob: &mut Blob, + current_epoch: u32, + message: CertifiedBlobMessage, +) { + // Check that the blob is registered in the system + assert!(blob_id(blob) == message.certified_blob_id(), EInvalidBlobId); + + // Check that the blob is not already certified + assert!(!blob.certified_epoch.is_some(), EAlreadyCertified); + + // Check that the message is from the current epoch + assert!(message.certified_epoch() == current_epoch, EWrongEpoch); + + // Check that the storage in the blob is still valid + assert!(message.certified_epoch() < blob.storage.end_epoch(), EResourceBounds); + + // Mark the blob as certified + blob.certified_epoch.fill(message.certified_epoch()); + + blob.emit_certified(false); +} + +/// Deletes a deletable blob and returns the contained storage. +/// +/// Emits a `BlobDeleted` event for the given epoch. +/// Aborts if the Blob is not deletable or already expired. +public(package) fun delete(self: Blob, epoch: u32): Storage { + let Blob { + id, + storage, + deletable, + blob_id, + certified_epoch, + .., + } = self; + assert!(deletable, EBlobNotDeletable); + assert!(storage.end_epoch() > epoch, EResourceBounds); + let object_id = id.to_inner(); + id.delete(); + emit_blob_deleted(epoch, blob_id, storage.end_epoch(), object_id, certified_epoch.is_some()); + storage +} + +/// Allows calling `.share()` on a `Blob` to wrap it into a shared `SharedBlob` whose lifetime can +/// be extended by anyone. +public use fun walrus::shared_blob::new as Blob.share; + +/// Allow the owner of a blob object to destroy it. +public fun burn(blob: Blob) { + let Blob { + id, + storage, + .., + } = blob; + + id.delete(); + storage.destroy(); +} + +/// Extend the period of validity of a blob with a new storage resource. +/// The new storage resource must be the same size as the storage resource +/// used in the blob, and have a longer period of validity. +public(package) fun extend_with_resource(blob: &mut Blob, extension: Storage, current_epoch: u32) { + // We only extend certified blobs within their period of validity + // with storage that extends this period. First we check for these + // conditions. + + blob.assert_certified_not_expired(current_epoch); + + // Check that the extension is valid, and the end + // period of the extension is after the current period. + assert!(extension.end_epoch() > blob.storage.end_epoch(), EResourceBounds); + + // Note: if the amounts do not match there will be an abort here. + blob.storage.fuse_periods(extension); + + blob.emit_certified(true); +} + +/// Emits a `BlobCertified` event for the given blob. +public(package) fun emit_certified(self: &Blob, is_extension: bool) { + // Emit certified event + // + // Note: We use the original certified period also for extensions since + // for the purposes of reconfiguration this is the committee that has a + // quorum that hold the resource. + emit_blob_certified( + *self.certified_epoch.borrow(), + self.blob_id, + self.storage.end_epoch(), + self.deletable, + self.id.to_inner(), + is_extension, + ); +} + +// === Metadata === + +/// Adds the metadata dynamic field to the Blob. +/// +/// Aborts if the metadata is already present. +public fun add_metadata(self: &mut Blob, metadata: Metadata) { + dynamic_field::add(&mut self.id, METADATA_DF, metadata) +} + +/// Removes the metadata dynamic field from the Blob, returning the contained `Metadata`. +/// +/// Aborts if the metadata does not exist. +public fun take_metadata(self: &mut Blob): Metadata { + dynamic_field::remove(&mut self.id, METADATA_DF) +} + +/// Returns the metadata associated with the Blob. +/// +/// Aborts if the metadata does not exist. +fun metadata(self: &mut Blob): &mut Metadata { + dynamic_field::borrow_mut(&mut self.id, METADATA_DF) +} + +/// Inserts a key-value pair into the metadata. +/// +/// If the key is already present, the value is updated. Aborts if the metadata does not exist. +public fun insert_or_update_metadata_pair(self: &mut Blob, key: String, value: String) { + self.metadata().insert_or_update(key, value) +} + +/// Removes the metadata associated with the given key. +/// +/// Aborts if the metadata does not exist. +public fun remove_metadata_pair(self: &mut Blob, key: &String): (String, String) { + self.metadata().remove(key) +} diff --git a/contracts/walrus/sources/system/bls_aggregate.move b/contracts/walrus/sources/system/bls_aggregate.move new file mode 100644 index 00000000..b378a52e --- /dev/null +++ b/contracts/walrus/sources/system/bls_aggregate.move @@ -0,0 +1,199 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::bls_aggregate; + +use sui::bls12381::{Self, bls12381_min_pk_verify, G1}; +use sui::group_ops::{Self, Element}; +use sui::vec_map::{Self, VecMap}; +use walrus::messages::{Self, CertifiedMessage}; + +// Error codes +const ETotalMemberOrder: u64 = 0; +const ESigVerification: u64 = 1; +const ENotEnoughStake: u64 = 2; +const EIncorrectCommittee: u64 = 3; + +public struct BlsCommitteeMember has store, copy, drop { + public_key: Element, + weight: u16, + node_id: ID, +} + +/// This represents a BLS signing committee for a given epoch. +public struct BlsCommittee has store, copy, drop { + /// A vector of committee members + members: vector, + /// The total number of shards held by the committee + n_shards: u16, + /// The epoch in which the committee is active. + epoch: u32, +} + +/// Constructor for committee. +public(package) fun new_bls_committee( + epoch: u32, + members: vector, +): BlsCommittee { + // Compute the total number of shards + let mut n_shards = 0; + members.do_ref!(|member| { + let weight = member.weight; + assert!(weight > 0, EIncorrectCommittee); + n_shards = n_shards + weight; + }); + + BlsCommittee { members, n_shards, epoch } +} + +/// Constructor for committee member. +public(package) fun new_bls_committee_member( + public_key: Element, + weight: u16, + node_id: ID, +): BlsCommitteeMember { + BlsCommitteeMember { + public_key, + weight, + node_id, + } +} + +// === Accessors for BlsCommitteeMember === + +/// Get the node id of the committee member. +public(package) fun node_id(self: &BlsCommitteeMember): sui::object::ID { + self.node_id +} + +// === Accessors for BlsCommittee === + +/// Get the epoch of the committee. +public(package) fun epoch(self: &BlsCommittee): u32 { + self.epoch +} + +/// Returns the number of shards held by the committee. +public(package) fun n_shards(self: &BlsCommittee): u16 { + self.n_shards +} + +/// Returns the member at given index +public(package) fun get_idx(self: &BlsCommittee, idx: u64): &BlsCommitteeMember { + self.members.borrow(idx) +} + +/// Checks if the committee contains a given node. +public(package) fun contains(self: &BlsCommittee, node_id: &ID): bool { + self.find_index(node_id).is_some() +} + +/// Returns the member weight if it is part of the committee or 0 otherwise +public(package) fun get_member_weight(self: &BlsCommittee, node_id: &ID): u16 { + self.find_index(node_id).and!(|idx| { + let member = &self.members[idx]; + option::some(member.weight) + }).get_with_default(0) +} + +/// Finds the index of the member by node_id +public(package) fun find_index(self: &BlsCommittee, node_id: &ID): std::option::Option { + self.members.find_index!(|member| &member.node_id == node_id) +} + +/// Returns the members of the committee with their weights. +public(package) fun to_vec_map(self: &BlsCommittee): VecMap { + let mut result = vec_map::empty(); + self.members.do_ref!(|member| { + result.insert(member.node_id, member.weight) + }); + result +} + +/// Verifies that a message is signed by a quorum of the members of a committee. +/// +/// The signers are listed as indices into the `members` vector of the committee +/// in increasing +/// order and with no repetitions. The total weight of the signers (i.e. total +/// number of shards) +/// is returned, but if a quorum is not reached the function aborts with an +/// error. +public(package) fun verify_quorum_in_epoch( + self: &BlsCommittee, + signature: vector, + signers: vector, + message: vector, +): CertifiedMessage { + let stake_support = self.verify_certificate( + &signature, + &signers, + &message, + ); + + messages::new_certified_message(message, self.epoch, stake_support) +} + +/// Returns true if the weight is more than the aggregate weight of quorum members of a committee. +public(package) fun verify_quorum(self: &BlsCommittee, weight: u16): bool { + 3 * (weight as u64) >= 2 * (self.n_shards as u64) + 1 +} + +/// Verify an aggregate BLS signature is a certificate in the epoch, and return +/// the type of +/// certificate and the bytes certified. The `signers` vector is an increasing +/// list of indexes +/// into the `members` vector of the committee. If there is a certificate, the +/// function +/// returns the total stake. Otherwise, it aborts. +public(package) fun verify_certificate( + self: &BlsCommittee, + signature: &vector, + signers: &vector, + message: &vector, +): u16 { + // Use the signers flags to construct the key and the weights. + + // Lower bound for the next `member_index` to ensure they are monotonically + // increasing + let mut min_next_member_index = 0; + let mut aggregate_key = bls12381::g1_identity(); + let mut aggregate_weight = 0; + + signers.do_ref!(|member_index| { + let member_index = *member_index as u64; + assert!(member_index >= min_next_member_index, ETotalMemberOrder); + min_next_member_index = member_index + 1; + + // Bounds check happens here + let member = &self.members[member_index]; + let key = &member.public_key; + let weight = member.weight; + + aggregate_key = bls12381::g1_add(&aggregate_key, key); + aggregate_weight = aggregate_weight + weight; + }); + + // The expression below is the solution to the inequality: + // n_shards = 3 f + 1 + // stake >= 2f + 1 + assert!(verify_quorum(self, aggregate_weight), ENotEnoughStake); + + // Verify the signature + let pub_key_bytes = group_ops::bytes(&aggregate_key); + assert!( + bls12381_min_pk_verify( + signature, + pub_key_bytes, + message, + ), + ESigVerification, + ); + + (aggregate_weight as u16) +} + +#[test_only] +/// Increments the committee epoch by one. +public fun increment_epoch_for_testing(self: &mut BlsCommittee) { + self.epoch = self.epoch + 1; +} diff --git a/contracts/walrus/sources/system/encoding.move b/contracts/walrus/sources/system/encoding.move new file mode 100644 index 00000000..4242c2ba --- /dev/null +++ b/contracts/walrus/sources/system/encoding.move @@ -0,0 +1,20 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::encoding; + +use walrus::redstuff; + +// Supported Encoding Types +const RED_STUFF_ENCODING: u8 = 0; + +// Errors +const EInvalidEncoding: u64 = 0; + +/// Computes the encoded length of a blob given its unencoded length, encoding type +/// and number of shards `n_shards`. +public fun encoded_blob_length(unencoded_length: u64, encoding_type: u8, n_shards: u16): u64 { + // Currently only supports a single encoding type + assert!(encoding_type == RED_STUFF_ENCODING, EInvalidEncoding); + redstuff::encoded_blob_length(unencoded_length, n_shards) +} diff --git a/contracts/walrus/sources/system/epoch_parameters.move b/contracts/walrus/sources/system/epoch_parameters.move new file mode 100644 index 00000000..414b0d44 --- /dev/null +++ b/contracts/walrus/sources/system/epoch_parameters.move @@ -0,0 +1,55 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::epoch_parameters; + +/// The epoch parameters for the system. +public struct EpochParams has store, copy, drop { + /// The storage capacity of the system. + total_capacity_size: u64, + /// The price per unit size of storage. + storage_price_per_unit_size: u64, + /// The write price per unit size. + write_price_per_unit_size: u64, +} + +// === Constructor === + +public(package) fun new( + total_capacity_size: u64, + storage_price_per_unit_size: u64, + write_price_per_unit_size: u64, +): EpochParams { + EpochParams { + total_capacity_size, + storage_price_per_unit_size, + write_price_per_unit_size, + } +} + +// === Accessors === + +/// The storage capacity of the system. +public(package) fun capacity(self: &EpochParams): u64 { + self.total_capacity_size +} + +/// The price per unit size of storage. +public(package) fun storage_price(self: &EpochParams): u64 { + self.storage_price_per_unit_size +} + +/// The write price per unit size. +public(package) fun write_price(self: &EpochParams): u64 { + self.write_price_per_unit_size +} + +// === Test only === + +public fun epoch_params_for_testing(): EpochParams { + EpochParams { + total_capacity_size: 1_000_000_000, + storage_price_per_unit_size: 5, + write_price_per_unit_size: 1, + } +} diff --git a/contracts/walrus/sources/system/event_blob.move b/contracts/walrus/sources/system/event_blob.move new file mode 100644 index 00000000..e854166b --- /dev/null +++ b/contracts/walrus/sources/system/event_blob.move @@ -0,0 +1,159 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Module to certify event blobs. +module walrus::event_blob; + +use sui::vec_map::VecMap; + +// === Definitions related to event blob certification === + +/// Event blob index which was attested by a storage node. +public struct EventBlobAttestation has store, copy, drop { + checkpoint_sequence_num: u64, + epoch: u32, +} + +/// State of a certified event blob. +public struct EventBlob has copy, store, drop { + /// Blob id of the certified event blob. + blob_id: u256, + /// Ending sui checkpoint of the certified event blob. + ending_checkpoint_sequence_number: u64, +} + +/// State of event blob stream. +#[allow(unused_field)] +public struct EventBlobCertificationState has key, store { + id: UID, + /// Latest certified event blob. + latest_certified_blob: Option, + /// Current event blob being attested. + aggregate_weight_per_blob: VecMap, +} + +// === Accessors related to event blob attestation === + +public(package) fun new_attestation( + checkpoint_sequence_num: u64, + epoch: u32, +): EventBlobAttestation { + EventBlobAttestation { + checkpoint_sequence_num, + epoch, + } +} + +public(package) fun last_attested_event_blob_checkpoint_seq_num(self: &EventBlobAttestation): u64 { + self.checkpoint_sequence_num +} + +public(package) fun last_attested_event_blob_epoch(self: &EventBlobAttestation): u32 { self.epoch } + +// === Accessors for EventBlob === + +public(package) fun new_event_blob( + ending_checkpoint_sequence_number: u64, + blob_id: u256, +): EventBlob { + EventBlob { + blob_id, + ending_checkpoint_sequence_number, + } +} + +/// Returns the blob id of the event blob +public(package) fun blob_id(self: &EventBlob): u256 { + self.blob_id +} + +/// Returns the ending checkpoint sequence number of the event blob +public(package) fun ending_checkpoint_sequence_number(self: &EventBlob): u64 { + self.ending_checkpoint_sequence_number +} + +// === Accessors for EventBlobCertificationState === + +/// Creates a blob state with no signers and no last checkpoint sequence number +public(package) fun create_with_empty_state(ctx: &mut TxContext): EventBlobCertificationState { + let id = object::new(ctx); + EventBlobCertificationState { + id, + latest_certified_blob: option::none(), + aggregate_weight_per_blob: sui::vec_map::empty(), + } +} + +/// Returns the blob id of the latest certified event blob +public(package) fun get_latest_certified_blob_id(self: &EventBlobCertificationState): Option { + self.latest_certified_blob.map!(|state| state.blob_id()) +} + +/// Returns the checkpoint sequence number of the latest certified event +/// blob +public(package) fun get_latest_certified_checkpoint_sequence_number( + self: &EventBlobCertificationState, +): Option { + self.latest_certified_blob.map!(|state| state.ending_checkpoint_sequence_number()) +} + +/// Returns true if a blob is already certified or false otherwise +public(package) fun is_blob_already_certified( + self: &EventBlobCertificationState, + ending_checkpoint_sequence_num: u64, +): bool { + self + .get_latest_certified_checkpoint_sequence_number() + .map!( + | + latest_certified_sequence_num, + | latest_certified_sequence_num >= ending_checkpoint_sequence_num, + ) + .get_with_default(false) +} + +/// Updates the latest certified event blob +public(package) fun update_latest_certified_event_blob( + self: &mut EventBlobCertificationState, + checkpoint_sequence_number: u64, + blob_id: u256, +) { + self.get_latest_certified_checkpoint_sequence_number().do!(|latest_certified_sequence_num| { + assert!(checkpoint_sequence_number > latest_certified_sequence_num); + }); + self.latest_certified_blob = + option::some( + new_event_blob(checkpoint_sequence_number, blob_id), + ); +} + +/// Update the aggregate weight of an event blob +public(package) fun update_aggregate_weight( + self: &mut EventBlobCertificationState, + blob_id: u256, + weight: u16, +): u16 { + let agg_weight = self.aggregate_weight_per_blob.get_mut(&blob_id); + *agg_weight = *agg_weight + weight; + *agg_weight +} + +/// Start tracking which nodes are signing the event blob with given id for +/// event blob certification +public(package) fun start_tracking_blob(self: &mut EventBlobCertificationState, blob_id: u256) { + if (!self.aggregate_weight_per_blob.contains(&blob_id)) { + self.aggregate_weight_per_blob.insert(blob_id, 0); + }; +} + +/// Stop tracking nodes for the given blob id +public(package) fun stop_tracking_blob(self: &mut EventBlobCertificationState, blob_id: u256) { + if (self.aggregate_weight_per_blob.contains(&blob_id)) { + self.aggregate_weight_per_blob.remove(&blob_id); + }; +} + +/// Reset blob certification state upon epoch change +public(package) fun reset(self: &mut EventBlobCertificationState) { + self.aggregate_weight_per_blob = sui::vec_map::empty(); +} diff --git a/contracts/walrus/sources/system/events.move b/contracts/walrus/sources/system/events.move new file mode 100644 index 00000000..7acccdae --- /dev/null +++ b/contracts/walrus/sources/system/events.move @@ -0,0 +1,146 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Module to emit events. Used to allow filtering all events in the +/// rust client (as work-around for the lack of composable event filters). +module walrus::events; + +use sui::event; + +// === Event definitions === + +/// Signals that a blob with meta-data has been registered. +public struct BlobRegistered has copy, drop { + epoch: u32, + blob_id: u256, + size: u64, + encoding_type: u8, + end_epoch: u32, + deletable: bool, + // The object id of the related `Blob` object + object_id: ID, +} + +/// Signals that a blob is certified. +public struct BlobCertified has copy, drop { + epoch: u32, + blob_id: u256, + end_epoch: u32, + deletable: bool, + // The object id of the related `Blob` object + object_id: ID, + // Marks if this is an extension for explorers, etc. + is_extension: bool, +} + +/// Signals that a blob has been deleted. +public struct BlobDeleted has copy, drop { + epoch: u32, + blob_id: u256, + end_epoch: u32, + // The object ID of the related `Blob` object. + object_id: ID, + // If the blob object was previously certified. + was_certified: bool, +} + +/// Signals that a BlobID is invalid. +public struct InvalidBlobID has copy, drop { + epoch: u32, // The epoch in which the blob ID is first registered as invalid + blob_id: u256, +} + +/// Signals that epoch `epoch` has started and the epoch change is in progress. +public struct EpochChangeStart has copy, drop { + epoch: u32, +} + +/// Signals that a set of storage nodes holding at least 2f+1 shards have finished the epoch +/// change, i.e., received all of their assigned shards. +public struct EpochChangeDone has copy, drop { + epoch: u32, +} + +/// Signals that a node has received the specified shards for the new epoch. +public struct ShardsReceived has copy, drop { + epoch: u32, + shards: vector, +} + +/// Signals that the committee and the system parameters for `next_epoch` have been selected. +public struct EpochParametersSelected has copy, drop { + next_epoch: u32, +} + +/// Signals that the given shards can be recovered using the shard recovery endpoint. +public struct ShardRecoveryStart has copy, drop { + epoch: u32, + shards: vector, +} + +// === Functions to emit the events from other modules === + +public(package) fun emit_blob_registered( + epoch: u32, + blob_id: u256, + size: u64, + encoding_type: u8, + end_epoch: u32, + deletable: bool, + object_id: ID, +) { + event::emit(BlobRegistered { + epoch, + blob_id, + size, + encoding_type, + end_epoch, + deletable, + object_id, + }); +} + +public(package) fun emit_blob_certified( + epoch: u32, + blob_id: u256, + end_epoch: u32, + deletable: bool, + object_id: ID, + is_extension: bool, +) { + event::emit(BlobCertified { epoch, blob_id, end_epoch, deletable, object_id, is_extension }); +} + +public(package) fun emit_invalid_blob_id(epoch: u32, blob_id: u256) { + event::emit(InvalidBlobID { epoch, blob_id }); +} + +public(package) fun emit_blob_deleted( + epoch: u32, + blob_id: u256, + end_epoch: u32, + object_id: ID, + was_certified: bool +) { + event::emit(BlobDeleted { epoch, blob_id, end_epoch, object_id, was_certified }); +} + +public(package) fun emit_epoch_change_start(epoch: u32) { + event::emit(EpochChangeStart { epoch }) +} + +public(package) fun emit_epoch_change_done(epoch: u32) { + event::emit(EpochChangeDone { epoch }) +} + +public(package) fun emit_shards_received(epoch: u32, shards: vector) { + event::emit(ShardsReceived { epoch, shards }) +} + +public(package) fun emit_epoch_parameters_selected(next_epoch: u32) { + event::emit(EpochParametersSelected { next_epoch }) +} + +public(package) fun emit_shard_recovery_start(epoch: u32, shards: vector) { + event::emit(ShardRecoveryStart { epoch, shards }) +} diff --git a/contracts/walrus/sources/system/messages.move b/contracts/walrus/sources/system/messages.move new file mode 100644 index 00000000..bece4669 --- /dev/null +++ b/contracts/walrus/sources/system/messages.move @@ -0,0 +1,273 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::messages; + +use sui::{bcs, bls12381::bls12381_min_pk_verify}; + +const APP_ID: u8 = 3; +const INTENT_VERSION: u8 = 0; + +const BLS_KEY_LEN: u64 = 48; + +// Message Types +const PROOF_OF_POSSESSION_MSG_TYPE: u8 = 0; +const BLOB_CERT_MSG_TYPE: u8 = 1; +const INVALID_BLOB_ID_MSG_TYPE: u8 = 2; + +// Errors +const EIncorrectAppId: u64 = 0; +const EIncorrectEpoch: u64 = 1; +const EInvalidMsgType: u64 = 2; +const EIncorrectIntentVersion: u64 = 3; + +#[error] +const EInvalidKeyLength: vector = b"The length of the provided bls key is incorrect."; + +/// Message signed by a BLS key in the proof of possession. +public struct ProofOfPossessionMessage has drop { + intent_type: u8, + intent_version: u8, + intent_app: u8, + epoch: u32, + sui_address: address, + bls_key: vector, +} + +/// Creates a new ProofOfPossessionMessage given the expected epoch, sui address and BLS key. +public(package) fun new_proof_of_possession_msg( + epoch: u32, + sui_address: address, + bls_key: vector, +): ProofOfPossessionMessage { + assert!(bls_key.length() == BLS_KEY_LEN, EInvalidKeyLength); + ProofOfPossessionMessage { + intent_type: PROOF_OF_POSSESSION_MSG_TYPE, + intent_version: INTENT_VERSION, + intent_app: APP_ID, + epoch, + sui_address, + bls_key, + } +} + +/// BCS encodes a ProofOfPossessionMessage, considering the BLS key as a fixed-length byte +/// array with 48 bytes. +public(package) fun to_bcs(self: &ProofOfPossessionMessage): vector { + let mut bcs = vector[]; + bcs.append(bcs::to_bytes(&self.intent_type)); + bcs.append(bcs::to_bytes(&self.intent_version)); + bcs.append(bcs::to_bytes(&self.intent_app)); + bcs.append(bcs::to_bytes(&self.epoch)); + bcs.append(bcs::to_bytes(&self.sui_address)); + self.bls_key.do_ref!(|key_byte| bcs.append(bcs::to_bytes(key_byte))); + bcs +} + +/// Verify the provided proof of possession using the contained public key and the provided +/// signature. +public(package) fun verify_proof_of_possession( + self: &ProofOfPossessionMessage, + pop_signature: vector, +): bool { + let message_bytes = self.to_bcs(); + bls12381_min_pk_verify( + &pop_signature, + &self.bls_key, + &message_bytes, + ) +} + +/// A message certified by nodes holding `stake_support` shards. +public struct CertifiedMessage has drop { + intent_type: u8, + intent_version: u8, + cert_epoch: u32, + stake_support: u16, + message: vector, +} + +/// Message type for certifying a blob. +/// +/// Constructed from a `CertifiedMessage`, states that `blob_id` has been certified in `epoch` +/// by a quorum. +public struct CertifiedBlobMessage has drop { + epoch: u32, + blob_id: u256, +} + +/// Message type for Invalid Blob Certificates. +/// +/// Constructed from a `CertifiedMessage`, states that `blob_id` has been marked as invalid +/// in `epoch` by a quorum. +public struct CertifiedInvalidBlobId has drop { + epoch: u32, + blob_id: u256, +} + +/// Creates a `CertifiedMessage` with support `stake_support` by parsing `message_bytes` and +/// verifying the intent and the message epoch. +public(package) fun new_certified_message( + message_bytes: vector, + committee_epoch: u32, + stake_support: u16, +): CertifiedMessage { + // Here we BCS decode the header of the message to check intents, epochs, etc. + + let mut bcs_message = bcs::new(message_bytes); + let intent_type = bcs_message.peel_u8(); + let intent_version = bcs_message.peel_u8(); + assert!(intent_version == INTENT_VERSION, EIncorrectIntentVersion); + + let intent_app = bcs_message.peel_u8(); + assert!(intent_app == APP_ID, EIncorrectAppId); + + let cert_epoch = bcs_message.peel_u32(); + assert!(cert_epoch == committee_epoch, EIncorrectEpoch); + + let message = bcs_message.into_remainder_bytes(); + + CertifiedMessage { intent_type, intent_version, cert_epoch, stake_support, message } +} + +/// Constructs the certified blob message, note that constructing +/// implies a certified message, that is already checked. +public(package) fun certify_blob_message(message: CertifiedMessage): CertifiedBlobMessage { + // Assert type is correct + assert!(message.intent_type() == BLOB_CERT_MSG_TYPE, EInvalidMsgType); + + // The certified blob message contain a blob_id : u256 + let epoch = message.cert_epoch(); + let message_body = message.into_message(); + + let mut bcs_body = bcs::new(message_body); + let blob_id = bcs_body.peel_u256(); + + // On purpose we do not check that nothing is left in the message + // to allow in the future for extensibility. + + CertifiedBlobMessage { epoch, blob_id } +} + +/// Constructs the certified blob message, note this is only +/// used for event blobs +public(package) fun certified_event_blob_message(epoch: u32, blob_id: u256): CertifiedBlobMessage { + CertifiedBlobMessage { epoch, blob_id } +} + +/// Construct the certified invalid Blob ID message, note that constructing +/// implies a certified message, that is already checked. +public(package) fun invalid_blob_id_message(message: CertifiedMessage): CertifiedInvalidBlobId { + // Assert type is correct + assert!(message.intent_type() == INVALID_BLOB_ID_MSG_TYPE, EInvalidMsgType); + + // The InvalidBlobID message has no payload besides the blob_id. + // The certified blob message contain a blob_id : u256 + let epoch = message.cert_epoch(); + let message_body = message.into_message(); + + let mut bcs_body = bcs::new(message_body); + let blob_id = bcs_body.peel_u256(); + + // This output is provided as a service in case anything else needs to rely on + // certified invalid blob ID information in the future. But out base design only + // uses the event emitted here. + CertifiedInvalidBlobId { epoch, blob_id } +} + +// === Accessors for CertifiedMessage === + +public(package) fun intent_type(self: &CertifiedMessage): u8 { + self.intent_type +} + +public(package) fun intent_version(self: &CertifiedMessage): u8 { + self.intent_version +} + +public(package) fun cert_epoch(self: &CertifiedMessage): u32 { + self.cert_epoch +} + +public(package) fun stake_support(self: &CertifiedMessage): u16 { + self.stake_support +} + +public(package) fun message(self: &CertifiedMessage): &vector { + &self.message +} + +// Deconstruct into the vector of message bytes +public(package) fun into_message(self: CertifiedMessage): vector { + self.message +} + +// === Accessors for CertifiedBlobMessage === + +public(package) fun certified_epoch(self: &CertifiedBlobMessage): u32 { + self.epoch +} + +public(package) fun certified_blob_id(self: &CertifiedBlobMessage): u256 { + self.blob_id +} + +// === Accessors for CertifiedInvalidBlobId === + +public(package) fun certified_invalid_epoch(self: &CertifiedInvalidBlobId): u32 { + self.epoch +} + +public(package) fun invalid_blob_id(self: &CertifiedInvalidBlobId): u256 { + self.blob_id +} + +// === Test only functions === + +#[test_only] +public fun certified_message_for_testing( + intent_type: u8, + intent_version: u8, + cert_epoch: u32, + stake_support: u16, + message: vector, +): CertifiedMessage { + CertifiedMessage { intent_type, intent_version, cert_epoch, stake_support, message } +} + +#[test_only] +public fun certified_blob_message_for_testing(epoch: u32, blob_id: u256): CertifiedBlobMessage { + CertifiedBlobMessage { epoch, blob_id } +} + +#[test_only] +public fun certified_message_bytes(epoch: u32, blob_id: u256): vector { + let mut message = vector[]; + message.push_back(BLOB_CERT_MSG_TYPE); + message.push_back(INTENT_VERSION); + message.push_back(APP_ID); + message.append(bcs::to_bytes(&epoch)); + message.append(bcs::to_bytes(&blob_id)); + message +} + +#[test_only] +public fun invalid_message_bytes(epoch: u32, blob_id: u256): vector { + let mut message = vector[]; + message.push_back(INVALID_BLOB_ID_MSG_TYPE); + message.push_back(INTENT_VERSION); + message.push_back(APP_ID); + message.append(bcs::to_bytes(&epoch)); + message.append(bcs::to_bytes(&blob_id)); + message +} + +#[test] +fun test_message_creation() { + let epoch = 42; + let blob_id = 0xdeadbeefdeadbeefdeadbeefdeadbeef; + let msg = certified_message_bytes(epoch, blob_id); + let cert_msg = new_certified_message(msg, epoch, 1).certify_blob_message(); + assert!(cert_msg.blob_id == blob_id); + assert!(cert_msg.epoch == epoch); +} diff --git a/contracts/walrus/sources/system/metadata.move b/contracts/walrus/sources/system/metadata.move new file mode 100644 index 00000000..2ef61e70 --- /dev/null +++ b/contracts/walrus/sources/system/metadata.move @@ -0,0 +1,37 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Contains the metadata for Blobs on Walrus. +module walrus::metadata; + +use sui::vec_map::{Self, VecMap}; +use std::string::String; + + +/// The metadata struct for Blob objects. +public struct Metadata has store, drop { + metadata: VecMap +} + +/// Creates a new instance of Metadata. +public fun new(): Metadata { + Metadata { + metadata: vec_map::empty() + } +} + +/// Inserts a key-value pair into the metadata. +/// +/// If the key is already present, the value is updated. +public fun insert_or_update(self: &mut Metadata, key: String, value: String) { + if (self.metadata.contains(&key)) { + self.metadata.remove(&key); + }; + self.metadata.insert(key, value); +} + + +/// Removes the metadata associated with the given key. +public fun remove(self: &mut Metadata, key: &String): (String, String) { + self.metadata.remove(key) +} diff --git a/contracts/walrus/sources/system/redstuff.move b/contracts/walrus/sources/system/redstuff.move new file mode 100644 index 00000000..2c3e99d3 --- /dev/null +++ b/contracts/walrus/sources/system/redstuff.move @@ -0,0 +1,104 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::redstuff; + +// The length of a hash used for the Red Stuff metadata +const DIGEST_LEN: u64 = 32; + +// The length of a blob id in the stored metadata +const BLOB_ID_LEN: u64 = 32; + +/// Computes the encoded length of a blob for the Red Stuff encoding, given its +/// unencoded size and the number of shards. The output length includes the +/// size of the metadata hashes and the blob ID. +/// This computation is the same as done by the function of the same name in +/// `crates/walrus_core/encoding/config.rs` and should be kept in sync. +public(package) fun encoded_blob_length(unencoded_length: u64, n_shards: u16): u64 { + // prettier-ignore + let slivers_size = ( + source_symbols_primary(n_shards) as u64 + (source_symbols_secondary(n_shards) as u64), + ) * (symbol_size(unencoded_length, n_shards) as u64); + + (n_shards as u64) * (slivers_size + metadata_size(n_shards)) +} + +/// The number of primary source symbols per sliver given `n_shards`. +fun source_symbols_primary(n_shards: u16): u16 { + n_shards - max_byzantine(n_shards) - decoding_safety_limit(n_shards) +} + +/// The number of secondary source symbols per sliver given `n_shards`. +fun source_symbols_secondary(n_shards: u16): u16 { + n_shards - 2 * max_byzantine(n_shards) - decoding_safety_limit(n_shards) +} + +/// The total number of source symbols given `n_shards`. +fun n_source_symbols(n_shards: u16): u64 { + (source_symbols_primary(n_shards) as u64) * (source_symbols_secondary(n_shards) as u64) +} + +/// Computes the symbol size given the `unencoded_length` and number of shards +/// `n_shards`. If the resulting symbols would be larger than a `u16`, this +/// results in an Error. +fun symbol_size(mut unencoded_length: u64, n_shards: u16): u16 { + if (unencoded_length == 0) { + unencoded_length = 1; + }; + let n_symbols = n_source_symbols(n_shards); + ((unencoded_length - 1) / n_symbols + 1) as u16 +} + +/// The size of the metadata, i.e. sliver root hashes and blob_id. +fun metadata_size(n_shards: u16): u64 { + (n_shards as u64) * DIGEST_LEN * 2 + BLOB_ID_LEN +} + +/// Returns the decoding safety limit. See `crates/walrus-core/src/encoding/config.rs` +/// for a description. +fun decoding_safety_limit(n_shards: u16): u16 { + // These ranges are chosen to ensure that the safety limit is at most 20% of f, + // up to a safety limit of 5. + (max_byzantine(n_shards) / 5).min(5) +} + +/// Maximum number of byzantine shards, given `n_shards`. +fun max_byzantine(n_shards: u16): u16 { + (n_shards - 1) / 3 +} + +// Tests + +#[test_only] +fun assert_encoded_size(unencoded_length: u64, n_shards: u16, encoded_size: u64) { + assert!(encoded_blob_length(unencoded_length, n_shards) == encoded_size, 0); +} + +#[test] +/// These tests replicate the tests for `encoded_blob_length` in +/// `crates/walrus_core/encoding/config.rs` and should be kept in sync. +fun test_encoded_size() { + assert_encoded_size(1, 10, 10 * ((4 + 7) + 10 * 2 * 32 + 32)); + assert_encoded_size(1, 1000, 1000 * ((329 + 662) + 1000 * 2 * 32 + 32)); + assert_encoded_size((4 * 7) * 100, 10, 10 * ((4 + 7) * 100 + 10 * 2 * 32 + 32)); + assert_encoded_size( + (329 * 662) * 100, + 1000, + 1000 * ((329 + 662) * 100 + 1000 * 2 * 32 + 32), + ); +} + +#[test] +fun test_zero_size() { + // test should fail here + encoded_blob_length(0, 10); +} + +#[test, expected_failure] +fun test_symbol_too_large() { + let n_shards = 100; + // Create an unencoded length for which each symbol must be larger than the maximum size + let unencoded_length = (0xffff + 1) * n_source_symbols(n_shards); + // Test should fail here + let _ = symbol_size(unencoded_length, n_shards); +} diff --git a/contracts/walrus/sources/system/shared_blob.move b/contracts/walrus/sources/system/shared_blob.move new file mode 100644 index 00000000..e3e24120 --- /dev/null +++ b/contracts/walrus/sources/system/shared_blob.move @@ -0,0 +1,43 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::shared_blob; + +use sui::{balance::{Self, Balance}, coin::Coin}; +use wal::wal::WAL; +use walrus::{blob::Blob, system::System}; + +/// A wrapper around `Blob` that acts as a "tip jar" that can be funded by anyone and allows +/// keeping the wrapped `Blob` alive indefinitely. +public struct SharedBlob has key, store { + id: UID, + blob: Blob, + funds: Balance, +} + +/// Shares the provided `blob` as a `SharedBlob` with zero funds. +public fun new(blob: Blob, ctx: &mut TxContext) { + transfer::share_object(SharedBlob { + id: object::new(ctx), + blob, + funds: balance::zero(), + }) +} + +/// Adds the provided `Coin` to the stored funds. +public fun fund(self: &mut SharedBlob, added_funds: Coin) { + self.funds.join(added_funds.into_balance()); +} + +/// Extends the lifetime of the wrapped `Blob` by `epochs_ahead` epochs if the stored funds are +/// sufficient and the new lifetime does not exceed the maximum lifetime. +public fun extend( + self: &mut SharedBlob, + system: &mut System, + epochs_ahead: u32, + ctx: &mut TxContext, +) { + let mut coin = self.funds.withdraw_all().into_coin(ctx); + system.extend_blob(&mut self.blob, epochs_ahead, &mut coin); + self.funds.join(coin.into_balance()); +} diff --git a/contracts/walrus/sources/system/storage_accounting.move b/contracts/walrus/sources/system/storage_accounting.move new file mode 100644 index 00000000..85b17b89 --- /dev/null +++ b/contracts/walrus/sources/system/storage_accounting.move @@ -0,0 +1,137 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::storage_accounting; + +use sui::balance::{Self, Balance}; +use wal::wal::WAL; + +// Errors +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const ETooFarInFuture: u64 = 0; + +/// Holds information about a future epoch, namely how much +/// storage needs to be reclaimed and the rewards to be distributed. +public struct FutureAccounting has store { + epoch: u32, + storage_to_reclaim: u64, + rewards_to_distribute: Balance, +} + +/// Constructor for FutureAccounting +public(package) fun new_future_accounting( + epoch: u32, + storage_to_reclaim: u64, + rewards_to_distribute: Balance, +): FutureAccounting { + FutureAccounting { epoch, storage_to_reclaim, rewards_to_distribute } +} + +/// Accessor for epoch, read-only +public(package) fun epoch(accounting: &FutureAccounting): u32 { + *&accounting.epoch +} + +/// Accessor for storage_to_reclaim, mutable. +public(package) fun storage_to_reclaim(accounting: &FutureAccounting): u64 { + accounting.storage_to_reclaim +} + +/// Increase storage to reclaim +public(package) fun increase_storage_to_reclaim(accounting: &mut FutureAccounting, amount: u64) { + accounting.storage_to_reclaim = accounting.storage_to_reclaim + amount; +} + +/// Decrease storage to reclaim +public(package) fun decrease_storage_to_reclaim(accounting: &mut FutureAccounting, amount: u64) { + accounting.storage_to_reclaim = accounting.storage_to_reclaim - amount; +} + +/// Accessor for rewards_to_distribute, mutable. +public(package) fun rewards_balance(accounting: &mut FutureAccounting): &mut Balance { + &mut accounting.rewards_to_distribute +} + +/// Destructor for FutureAccounting, when empty. +public(package) fun delete_empty_future_accounting(self: FutureAccounting) { + self.unwrap_balance().destroy_zero() +} + +public(package) fun unwrap_balance(self: FutureAccounting): Balance { + let FutureAccounting { + rewards_to_distribute, + .., + } = self; + rewards_to_distribute +} + +#[test_only] +public(package) fun burn_for_testing(self: FutureAccounting) { + let FutureAccounting { + rewards_to_distribute, + .., + } = self; + + rewards_to_distribute.destroy_for_testing(); +} + +/// A ring buffer holding future accounts for a continuous range of epochs. +public struct FutureAccountingRingBuffer has store { + current_index: u32, + length: u32, + ring_buffer: vector, +} + +/// Constructor for FutureAccountingRingBuffer +public(package) fun ring_new(length: u32): FutureAccountingRingBuffer { + let ring_buffer = vector::tabulate!( + length as u64, + |epoch| FutureAccounting { + epoch: epoch as u32, + storage_to_reclaim: 0, + rewards_to_distribute: balance::zero(), + }, + ); + + FutureAccountingRingBuffer { current_index: 0, length: length, ring_buffer: ring_buffer } +} + +/// Lookup an entry a number of epochs in the future. +public(package) fun ring_lookup_mut( + self: &mut FutureAccountingRingBuffer, + epochs_in_future: u32, +): &mut FutureAccounting { + // Check for out-of-bounds access. + assert!(epochs_in_future < self.length, ETooFarInFuture); + + let actual_index = (epochs_in_future + self.current_index) % self.length; + &mut self.ring_buffer[actual_index as u64] +} + +public(package) fun ring_pop_expand(self: &mut FutureAccountingRingBuffer): FutureAccounting { + // Get current epoch + let current_index = self.current_index; + let current_epoch = self.ring_buffer[current_index as u64].epoch; + + // Expand the ring buffer + self + .ring_buffer + .push_back(FutureAccounting { + epoch: current_epoch + self.length, + storage_to_reclaim: 0, + rewards_to_distribute: balance::zero(), + }); + + // Now swap remove the current element and increment the current_index + let accounting = self.ring_buffer.swap_remove(current_index as u64); + self.current_index = (current_index + 1) % self.length; + + accounting +} + +// === Accessors === + +/// The maximum number of epochs for which we can use `self`. +public(package) fun max_epochs_ahead(self: &FutureAccountingRingBuffer): u32 { + self.length +} diff --git a/contracts/walrus/sources/system/storage_resource.move b/contracts/walrus/sources/system/storage_resource.move new file mode 100644 index 00000000..7842895e --- /dev/null +++ b/contracts/walrus/sources/system/storage_resource.move @@ -0,0 +1,144 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module walrus::storage_resource; + +const EInvalidEpoch: u64 = 0; +const EIncompatibleEpochs: u64 = 1; +const EIncompatibleAmount: u64 = 2; + +/// Reservation for storage for a given period, which is inclusive start, exclusive end. +public struct Storage has key, store { + id: UID, + start_epoch: u32, + end_epoch: u32, + storage_size: u64, +} + +// === Accessors === + +public fun start_epoch(self: &Storage): u32 { + self.start_epoch +} + +public fun end_epoch(self: &Storage): u32 { + self.end_epoch +} + +public fun storage_size(self: &Storage): u64 { + self.storage_size +} + +/// Constructor for [Storage] objects. +/// Necessary to allow `walrus::system` to create storage objects. +/// Cannot be called outside of the current module and [walrus::system]. +public(package) fun create_storage( + start_epoch: u32, + end_epoch: u32, + storage_size: u64, + ctx: &mut TxContext, +): Storage { + Storage { id: object::new(ctx), start_epoch, end_epoch, storage_size } +} + +/// Extends the end epoch by `extendion_epochs` epochs. +public(package) fun extend_end_epoch(self: &mut Storage, extension_epochs: u32) { + self.end_epoch = self.end_epoch + extension_epochs; +} + +/// Split the storage object into two based on `split_epoch` +/// +/// `storage` is modified to cover the period from `start_epoch` to `split_epoch` +/// and a new storage object covering `split_epoch` to `end_epoch` is returned. +public fun split_by_epoch(storage: &mut Storage, split_epoch: u32, ctx: &mut TxContext): Storage { + assert!(split_epoch >= storage.start_epoch && split_epoch <= storage.end_epoch, EInvalidEpoch); + let end_epoch = storage.end_epoch; + storage.end_epoch = split_epoch; + Storage { + id: object::new(ctx), + start_epoch: split_epoch, + end_epoch, + storage_size: storage.storage_size, + } +} + +/// Split the storage object into two based on `split_size` +/// +/// `storage` is modified to cover `split_size` and a new object covering +/// `storage.storage_size - split_size` is created. +public fun split_by_size(storage: &mut Storage, split_size: u64, ctx: &mut TxContext): Storage { + let storage_size = storage.storage_size - split_size; + storage.storage_size = split_size; + Storage { + id: object::new(ctx), + start_epoch: storage.start_epoch, + end_epoch: storage.end_epoch, + storage_size, + } +} + +/// Fuse two storage objects that cover adjacent periods with the same storage size. +public fun fuse_periods(first: &mut Storage, second: Storage) { + let Storage { + id, + start_epoch: second_start, + end_epoch: second_end, + storage_size: second_size, + } = second; + id.delete(); + assert!(first.storage_size == second_size, EIncompatibleAmount); + if (first.end_epoch == second_start) { + first.end_epoch = second_end; + } else { + assert!(first.start_epoch == second_end, EIncompatibleEpochs); + first.start_epoch = second_start; + } +} + +/// Fuse two storage objects that cover the same period +public fun fuse_amount(first: &mut Storage, second: Storage) { + let Storage { + id, + start_epoch: second_start, + end_epoch: second_end, + storage_size: second_size, + } = second; + id.delete(); + assert!( + first.start_epoch == second_start && first.end_epoch == second_end, + EIncompatibleEpochs, + ); + first.storage_size = first.storage_size + second_size; +} + +/// Fuse two storage objects that either cover the same period +/// or adjacent periods with the same storage size. +public fun fuse(first: &mut Storage, second: Storage) { + if (first.start_epoch == second.start_epoch) { + // Fuse by storage_size + fuse_amount(first, second); + } else { + // Fuse by period + fuse_periods(first, second); + } +} + +#[test_only] +/// Constructor for [Storage] objects for tests +public fun create_for_test( + start_epoch: u32, + end_epoch: u32, + storage_size: u64, + ctx: &mut TxContext, +): Storage { + Storage { id: object::new(ctx), start_epoch, end_epoch, storage_size } +} + +/// Destructor for [Storage] objects +public fun destroy(storage: Storage) { + let Storage { + id, + .., + } = storage; + id.delete(); +} diff --git a/contracts/walrus/sources/system/system_state_inner.move b/contracts/walrus/sources/system/system_state_inner.move new file mode 100644 index 00000000..4707eb90 --- /dev/null +++ b/contracts/walrus/sources/system/system_state_inner.move @@ -0,0 +1,528 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_variable, unused_mut_parameter, unused_field)] +module walrus::system_state_inner; + +use sui::{balance::Balance, coin::Coin}; +use wal::wal::WAL; +use walrus::{ + blob::{Self, Blob}, + bls_aggregate::{Self, BlsCommittee}, + encoding::encoded_blob_length, + epoch_parameters::EpochParams, + event_blob::{Self, EventBlobCertificationState, new_attestation}, + events::emit_invalid_blob_id, + messages, + storage_accounting::{Self, FutureAccountingRingBuffer}, + storage_node::StorageNodeCap, + storage_resource::{Self, Storage} +}; + +/// An upper limit for the maximum number of epochs ahead for which a blob can be registered. +/// Needed to bound the size of the `future_accounting`. +const MAX_MAX_EPOCHS_AHEAD: u32 = 1000; + +// Keep in sync with the same constant in `crates/walrus-sui/utils.rs`. +const BYTES_PER_UNIT_SIZE: u64 = 1_024 * 1_024; // 1 MiB + +// Errors +// Keep errors in `walrus-sui/types/move_errors.rs` up to date with changes here. +const EInvalidMaxEpochsAhead: u64 = 0; +const EStorageExceeded: u64 = 1; +const EInvalidEpochsAhead: u64 = 2; +const EInvalidIdEpoch: u64 = 3; +const EIncorrectCommittee: u64 = 4; +const EInvalidAccountingEpoch: u64 = 5; +const EIncorrectAttestation: u64 = 6; +const ERepeatedAttestation: u64 = 7; +const ENotCommitteeMember: u64 = 8; + +/// The inner object that is not present in signatures and can be versioned. +#[allow(unused_field)] +public struct SystemStateInnerV1 has key, store { + id: UID, + /// The current committee, with the current epoch. + committee: BlsCommittee, + // Some accounting + total_capacity_size: u64, + used_capacity_size: u64, + /// The price per unit size of storage. + storage_price_per_unit_size: u64, + /// The write price per unit size. + write_price_per_unit_size: u64, + /// Accounting ring buffer for future epochs. + future_accounting: FutureAccountingRingBuffer, + /// Event blob certification state + event_blob_certification_state: EventBlobCertificationState, +} + +/// Creates an empty system state with a capacity of zero and an empty +/// committee. +public(package) fun create_empty(max_epochs_ahead: u32, ctx: &mut TxContext): SystemStateInnerV1 { + let committee = bls_aggregate::new_bls_committee(0, vector[]); + assert!(max_epochs_ahead <= MAX_MAX_EPOCHS_AHEAD, EInvalidMaxEpochsAhead); + let future_accounting = storage_accounting::ring_new(max_epochs_ahead); + let event_blob_certification_state = event_blob::create_with_empty_state( + ctx, + ); + let id = object::new(ctx); + SystemStateInnerV1 { + id, + committee, + total_capacity_size: 0, + used_capacity_size: 0, + storage_price_per_unit_size: 0, + write_price_per_unit_size: 0, + future_accounting, + event_blob_certification_state, + } +} + +/// Update epoch to next epoch, and update the committee, price and capacity. +/// +/// Called by the epoch change function that connects `Staking` and `System`. +/// Returns +/// the balance of the rewards from the previous epoch. +public(package) fun advance_epoch( + self: &mut SystemStateInnerV1, + new_committee: BlsCommittee, + new_epoch_params: EpochParams, +): Balance { + // Check new committee is valid, the existence of a committee for the next + // epoch + // is proof that the time has come to move epochs. + let old_epoch = self.epoch(); + let new_epoch = old_epoch + 1; + + assert!(new_committee.epoch() == new_epoch, EIncorrectCommittee); + self.committee = new_committee; + + // Update the system object. + self.total_capacity_size = new_epoch_params.capacity().max(self.used_capacity_size); + self.storage_price_per_unit_size = new_epoch_params.storage_price(); + self.write_price_per_unit_size = new_epoch_params.write_price(); + + let accounts_old_epoch = self.future_accounting.ring_pop_expand(); + + // Make sure that we have the correct epoch + assert!(accounts_old_epoch.epoch() == old_epoch, EInvalidAccountingEpoch); + + // Stop tracking all event blobs + self.event_blob_certification_state.reset(); + + // Update storage based on the accounts data. + self.used_capacity_size = self.used_capacity_size - accounts_old_epoch.storage_to_reclaim(); + accounts_old_epoch.unwrap_balance() +} + +/// Allow buying a storage reservation for a given period of epochs. +public(package) fun reserve_space( + self: &mut SystemStateInnerV1, + storage_amount: u64, + epochs_ahead: u32, + payment: &mut Coin, + ctx: &mut TxContext, +): Storage { + // Check the period is within the allowed range. + assert!(epochs_ahead > 0, EInvalidEpochsAhead); + assert!(epochs_ahead <= self.future_accounting.max_epochs_ahead(), EInvalidEpochsAhead); + + // Check capacity is available. + assert!(self.used_capacity_size + storage_amount <= self.total_capacity_size, EStorageExceeded); + + // Pay rewards for each future epoch into the future accounting. + self.process_storage_payments(storage_amount, 0, epochs_ahead, payment); + + self.reserve_space_without_payment(storage_amount, epochs_ahead, ctx) +} + +/// Allow buying a storage reservation for a given period of epochs without +/// payment. +/// Only to be used for event blobs. +fun reserve_space_without_payment( + self: &mut SystemStateInnerV1, + storage_amount: u64, + epochs_ahead: u32, + ctx: &mut TxContext, +): Storage { + // Check the period is within the allowed range. + assert!(epochs_ahead > 0, EInvalidEpochsAhead); + assert!(epochs_ahead <= self.future_accounting.max_epochs_ahead(), EInvalidEpochsAhead); + + // Update the storage accounting. + self.used_capacity_size = self.used_capacity_size + storage_amount; + + // Account the space to reclaim in the future. + let final_account = self.future_accounting.ring_lookup_mut(epochs_ahead - 1); + final_account.increase_storage_to_reclaim(storage_amount); + + let self_epoch = epoch(self); + + storage_resource::create_storage( + self_epoch, + self_epoch + epochs_ahead, + storage_amount, + ctx, + ) +} + +/// Processes invalid blob id message. Checks the certificate in the current +/// committee and ensures +/// that the epoch is correct before emitting an event. +public(package) fun invalidate_blob_id( + self: &SystemStateInnerV1, + signature: vector, + members: vector, + message: vector, +): u256 { + let certified_message = self + .committee + .verify_quorum_in_epoch( + signature, + members, + message, + ); + + let invalid_blob_message = certified_message.invalid_blob_id_message(); + let blob_id = invalid_blob_message.invalid_blob_id(); + // Assert the epoch is correct. + let epoch = invalid_blob_message.certified_invalid_epoch(); + assert!(epoch == self.epoch(), EInvalidIdEpoch); + + // Emit the event about a blob id being invalid here. + emit_invalid_blob_id( + epoch, + blob_id, + ); + blob_id +} + +/// Registers a new blob in the system. +/// `size` is the size of the unencoded blob. The reserved space in `storage` +/// must be at +/// least the size of the encoded blob. +public(package) fun register_blob( + self: &mut SystemStateInnerV1, + storage: Storage, + blob_id: u256, + root_hash: u256, + size: u64, + encoding_type: u8, + deletable: bool, + write_payment_coin: &mut Coin, + ctx: &mut TxContext, +): Blob { + let blob = blob::new( + storage, + blob_id, + root_hash, + size, + encoding_type, + deletable, + self.epoch(), + self.n_shards(), + ctx, + ); + let write_price = self.write_price(blob.encoded_size(self.n_shards())); + let payment = write_payment_coin.split(write_price, ctx).into_balance(); + let accounts = self.future_accounting.ring_lookup_mut(0).rewards_balance().join(payment); + blob +} + +/// Certify that a blob will be available in the storage system until the end +/// epoch of the +/// storage associated with it. +public(package) fun certify_blob( + self: &SystemStateInnerV1, + blob: &mut Blob, + signature: vector, + signers: vector, + message: vector, +) { + let certified_msg = self + .committee() + .verify_quorum_in_epoch( + signature, + signers, + message, + ); + let certified_blob_msg = certified_msg.certify_blob_message(); + blob.certify_with_certified_msg(self.epoch(), certified_blob_msg); +} + +/// Deletes a deletable blob and returns the contained storage resource. +public(package) fun delete_blob(self: &SystemStateInnerV1, blob: Blob): Storage { + blob.delete(self.epoch()) +} + +/// Extend the period of validity of a blob with a new storage resource. +/// The new storage resource must be the same size as the storage resource +/// used in the blob, and have a longer period of validity. +public(package) fun extend_blob_with_resource( + self: &SystemStateInnerV1, + blob: &mut Blob, + extension: Storage, +) { + blob.extend_with_resource(extension, self.epoch()); +} + +/// Extend the period of validity of a blob by extending its contained storage +/// resource. +public(package) fun extend_blob( + self: &mut SystemStateInnerV1, + blob: &mut Blob, + epochs_ahead: u32, + payment: &mut Coin, +) { + // Check that the blob is certified and not expired. + blob.assert_certified_not_expired(self.epoch()); + + let start_offset = blob.storage().end_epoch() - self.epoch(); + let end_offset = start_offset + epochs_ahead; + + // Check the period is within the allowed range. + assert!(epochs_ahead > 0, EInvalidEpochsAhead); + assert!(end_offset <= self.future_accounting.max_epochs_ahead(), EInvalidEpochsAhead); + + // Pay rewards for each future epoch into the future accounting. + let storage_size = blob.storage().storage_size(); + self.process_storage_payments( + storage_size, + start_offset, + end_offset, + payment, + ); + + // Account the space to reclaim in the future. + + // First account for the space not being freed in the original end epoch. + self + .future_accounting + .ring_lookup_mut(start_offset - 1) + .decrease_storage_to_reclaim(storage_size); + + // Then account for the space being freed in the new end epoch. + self + .future_accounting + .ring_lookup_mut(end_offset - 1) + .increase_storage_to_reclaim(storage_size); + + blob.storage_mut().extend_end_epoch(epochs_ahead); + + blob.emit_certified(true); +} + +fun process_storage_payments( + self: &mut SystemStateInnerV1, + storage_size: u64, + start_offset: u32, + end_offset: u32, + payment: &mut Coin, +) { + let storage_units = storage_units_from_size(storage_size); + let period_payment_due = self.storage_price_per_unit_size * storage_units; + let coin_balance = payment.balance_mut(); + + start_offset.range_do!(end_offset, |i| { + let accounts = self.future_accounting.ring_lookup_mut(i); + + // Distribute rewards + let rewards_balance = accounts.rewards_balance(); + // Note this will abort if the balance is not enough. + let epoch_payment = coin_balance.split(period_payment_due); + rewards_balance.join(epoch_payment); + }); +} + +public(package) fun certify_event_blob( + self: &mut SystemStateInnerV1, + cap: &mut StorageNodeCap, + blob_id: u256, + root_hash: u256, + size: u64, + encoding_type: u8, + ending_checkpoint_sequence_num: u64, + epoch: u32, + ctx: &mut TxContext, +) { + assert!(self.committee().contains(&cap.node_id()), ENotCommitteeMember); + assert!(epoch == self.epoch(), EInvalidIdEpoch); + + let cap_attestion = cap.last_event_blob_attestation(); + if (cap_attestion.is_some()) { + let attestation = cap_attestion.destroy_some(); + assert!( + attestation.last_attested_event_blob_epoch() < self.epoch() || + ending_checkpoint_sequence_num > + attestation.last_attested_event_blob_checkpoint_seq_num(), + ERepeatedAttestation, + ); + let latest_certified_checkpoint_seq_num = self + .event_blob_certification_state + .get_latest_certified_checkpoint_sequence_number(); + if (latest_certified_checkpoint_seq_num.is_some()) { + let certified_checkpoint_seq_num = latest_certified_checkpoint_seq_num.destroy_some(); + assert!( + attestation.last_attested_event_blob_epoch() < self.epoch() || + attestation.last_attested_event_blob_checkpoint_seq_num() + <= certified_checkpoint_seq_num, + EIncorrectAttestation, + ); + } else { + assert!( + attestation.last_attested_event_blob_epoch() < self.epoch(), + EIncorrectAttestation, + ); + } + }; + + let attestation = new_attestation(ending_checkpoint_sequence_num, epoch); + cap.set_last_event_blob_attestation(attestation); + + let blob_certified = self + .event_blob_certification_state + .is_blob_already_certified( + ending_checkpoint_sequence_num, + ); + if (blob_certified) { + return + }; + + self.event_blob_certification_state.start_tracking_blob(blob_id); + let weight = self.committee().get_member_weight(&cap.node_id()); + let agg_weight = self.event_blob_certification_state.update_aggregate_weight(blob_id, weight); + let certified = self.committee().verify_quorum(agg_weight); + if (!certified) { + return + }; + + let num_shards = self.n_shards(); + let epochs_ahead = self.future_accounting.max_epochs_ahead(); + let storage = self.reserve_space_without_payment( + encoded_blob_length( + size, + encoding_type, + num_shards, + ), + epochs_ahead, + ctx, + ); + let mut blob = blob::new( + storage, + blob_id, + root_hash, + size, + encoding_type, + false, + self.epoch(), + self.n_shards(), + ctx, + ); + let certified_blob_msg = messages::certified_event_blob_message( + self.epoch(), + blob_id, + ); + blob.certify_with_certified_msg(self.epoch(), certified_blob_msg); + self + .event_blob_certification_state + .update_latest_certified_event_blob( + ending_checkpoint_sequence_num, + blob_id, + ); + self.event_blob_certification_state.stop_tracking_blob(blob_id); + blob.burn(); +} + +// === Accessors === + +/// Get epoch. Uses the committee to get the epoch. +public(package) fun epoch(self: &SystemStateInnerV1): u32 { + self.committee.epoch() +} + +/// Accessor for total capacity size. +public(package) fun total_capacity_size(self: &SystemStateInnerV1): u64 { + self.total_capacity_size +} + +/// Accessor for used capacity size. +public(package) fun used_capacity_size(self: &SystemStateInnerV1): u64 { + self.used_capacity_size +} + +/// An accessor for the current committee. +public(package) fun committee(self: &SystemStateInnerV1): &BlsCommittee { + &self.committee +} + +#[test_only] +public(package) fun committee_mut(self: &mut SystemStateInnerV1): &mut BlsCommittee { + &mut self.committee +} + +public(package) fun n_shards(self: &SystemStateInnerV1): u16 { + self.committee.n_shards() +} + +public(package) fun write_price(self: &SystemStateInnerV1, write_size: u64): u64 { + let storage_units = storage_units_from_size(write_size); + self.write_price_per_unit_size * storage_units +} + +fun storage_units_from_size(size: u64): u64 { + (size + BYTES_PER_UNIT_SIZE - 1) / BYTES_PER_UNIT_SIZE +} + +// === Testing === + +#[test_only] +use walrus::{test_utils}; + +#[test_only] +public(package) fun new_for_testing(): SystemStateInnerV1 { + let committee = test_utils::new_bls_committee_for_testing(0); + let ctx = &mut tx_context::dummy(); + let id = object::new(ctx); + SystemStateInnerV1 { + id, + committee, + total_capacity_size: 1_000_000_000, + used_capacity_size: 0, + storage_price_per_unit_size: 5, + write_price_per_unit_size: 1, + future_accounting: storage_accounting::ring_new(104), + event_blob_certification_state: event_blob::create_with_empty_state( + ctx, + ), + } +} + +#[test_only] +public(package) fun new_for_testing_with_multiple_members(ctx: &mut TxContext): SystemStateInnerV1 { + let committee = test_utils::new_bls_committee_with_multiple_members_for_testing( + 0, + ctx, + ); + + let id = object::new(ctx); + SystemStateInnerV1 { + id, + committee, + total_capacity_size: 1_000_000_000, + used_capacity_size: 0, + storage_price_per_unit_size: 5, + write_price_per_unit_size: 1, + future_accounting: storage_accounting::ring_new(104), + event_blob_certification_state: event_blob::create_with_empty_state( + ctx, + ), + } +} + +#[test_only] +public(package) fun get_event_blob_certification_state( + system: &SystemStateInnerV1, +): &EventBlobCertificationState { + &system.event_blob_certification_state +} diff --git a/docs/dev-guide/sui-struct.md b/docs/dev-guide/sui-struct.md index 92a64997..060ce09c 100644 --- a/docs/dev-guide/sui-struct.md +++ b/docs/dev-guide/sui-struct.md @@ -9,7 +9,7 @@ on Sui. The Move code of the Walrus Testnet contracts is available at -. An example package using +. An example package using the Walrus contracts is available at . diff --git a/examples/move/walrus_dep/Move.toml b/examples/move/walrus_dep/Move.toml index ca68f03c..7eb61fe4 100644 --- a/examples/move/walrus_dep/Move.toml +++ b/examples/move/walrus_dep/Move.toml @@ -4,7 +4,7 @@ edition = "2024.beta" [dependencies] Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "testnet-v1.29.2" } -blob_store = { git = "https://github.com/MystenLabs/walrus-docs.git", rev = "main", subdir = "contracts/blob_store" } +blob_store = { git = "https://github.com/MystenLabs/walrus-docs.git", rev = "main", subdir = "contracts/sources/walrus" } [addresses] walrus_dep = "0x0"