diff --git a/.github/workflows/contracts.yml b/.github/workflows/contracts.yml index 0ef0037..ecee33c 100644 --- a/.github/workflows/contracts.yml +++ b/.github/workflows/contracts.yml @@ -79,7 +79,7 @@ jobs: run : forge coverage --evm-version cancun --report lcov - name : Prune coverage - run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*' + run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*' --ignore-errors unused,unused - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 diff --git a/src/L1/rollup/IScrollChain.sol b/src/L1/rollup/IScrollChain.sol index 637b006..81f89d7 100644 --- a/src/L1/rollup/IScrollChain.sol +++ b/src/L1/rollup/IScrollChain.sol @@ -68,19 +68,6 @@ interface IScrollChain { * Public Mutating Functions * *****************************/ - /// @notice Commit a batch of transactions on layer 1. - /// - /// @param version The version of current batch. - /// @param parentBatchHeader The header of parent batch, see the comments of `BatchHeaderV0Codec`. - /// @param chunks The list of encoded chunks, see the comments of `ChunkCodec`. - /// @param skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not. - function commitBatch( - uint8 version, - bytes calldata parentBatchHeader, - bytes[] memory chunks, - bytes calldata skippedL1MessageBitmap - ) external; - /// @notice Commit a batch of transactions on layer 1 with blob data proof. /// /// @dev Memory layout of `blobDataProof`: @@ -107,28 +94,6 @@ interface IScrollChain { /// @param lastBatchHeader The header of last batch to revert, see the encoding in comments of `commitBatch`. function revertBatch(bytes calldata firstBatchHeader, bytes calldata lastBatchHeader) external; - /// @notice Finalize a committed batch (with blob) on layer 1. - /// - /// @dev Memory layout of `blobDataProof`: - /// | z | y | kzg_commitment | kzg_proof | - /// |---------|---------|----------------|-----------| - /// | bytes32 | bytes32 | bytes48 | bytes48 | - /// - /// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch. - /// @param prevStateRoot The state root of parent batch. - /// @param postStateRoot The state root of current batch. - /// @param withdrawRoot The withdraw trie root of current batch. - /// @param blobDataProof The proof for blob data. - /// @param aggrProof The aggregation proof for current batch. - function finalizeBatchWithProof4844( - bytes calldata batchHeader, - bytes32 prevStateRoot, - bytes32 postStateRoot, - bytes32 withdrawRoot, - bytes calldata blobDataProof, - bytes calldata aggrProof - ) external; - /// @notice Finalize a list of committed batches (i.e. bundle) on layer 1. /// @param batchHeader The header of last batch in current bundle, see the encoding in comments of `commitBatch. /// @param postStateRoot The state root after current bundle. @@ -140,4 +105,8 @@ interface IScrollChain { bytes32 withdrawRoot, bytes calldata aggrProof ) external; + + /// @notice Finalize the initial Euclid batch. + /// @param postStateRoot The state root after current batch. + function finalizeEuclidInitialBatch(bytes32 postStateRoot) external; } diff --git a/src/L1/rollup/ScrollChain.sol b/src/L1/rollup/ScrollChain.sol index 0cf493d..7c5beac 100644 --- a/src/L1/rollup/ScrollChain.sol +++ b/src/L1/rollup/ScrollChain.sol @@ -105,6 +105,24 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { /// @dev Thrown when the given address is `address(0)`. error ErrorZeroAddress(); + /// @dev Thrown when commit old batch after Euclid fork is enabled. + error ErrorEuclidForkEnabled(); + + /// @dev Thrown when SC finalize V5 batch before all v4 batches are finalized. + error ErrorNotAllV4BatchFinalized(); + + /// @dev Thrown when the committed v5 batch doesn't contain only one chunk. + error ErrorV5BatchNotContainsOnlyOneChunk(); + + /// @dev Thrown when the committed v5 batch doesn't contain only one block. + error ErrorV5BatchNotContainsOnlyOneBlock(); + + /// @dev Thrown when the committed v5 batch contains some transactions (L1 or L2). + error ErrorV5BatchContainsTransactions(); + + /// @dev Thrown when finalize v4/v5, v5/v6, v4/v5/v6 batches in the same bundle. + error ErrorFinalizePreAndPostEuclidBatchInOneBundle(); + /************* * Constants * *************/ @@ -157,6 +175,9 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { /// @inheritdoc IScrollChain mapping(uint256 => bytes32) public override withdrawRoots; + /// @notice The index of first Euclid batch. + uint256 public initialEuclidBatchIndex; + /********************** * Function Modifiers * **********************/ @@ -261,72 +282,15 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { emit FinalizeBatch(0, _batchHash, _stateRoot, bytes32(0)); } - /// @inheritdoc IScrollChain - function commitBatch( - uint8 _version, - bytes calldata _parentBatchHeader, - bytes[] memory _chunks, - bytes calldata _skippedL1MessageBitmap - ) external override OnlySequencer whenNotPaused { - (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( - _parentBatchHeader, - _chunks - ); - - bytes32 _batchHash; - uint256 batchPtr; - bytes32 _dataHash; - uint256 _totalL1MessagesPoppedInBatch; - if (1 <= _version && _version <= 2) { - // versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec, - // but they use different blob encoding and different verifiers. - (_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1( - _totalL1MessagesPoppedOverall, - _chunks, - _skippedL1MessageBitmap - ); - assembly { - batchPtr := mload(0x40) - _totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch) - } - - // store entries, the order matters - // Some are using `BatchHeaderV0Codec`, see comments of `BatchHeaderV1Codec`. - BatchHeaderV0Codec.storeVersion(batchPtr, _version); - BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); - BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); - BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); - BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); - BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _getBlobVersionedHash()); - BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); - BatchHeaderV1Codec.storeSkippedBitmap(batchPtr, _skippedL1MessageBitmap); - // compute batch hash, V1 and V2 has same code as V0 - _batchHash = BatchHeaderV0Codec.computeBatchHash( - batchPtr, - BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length - ); - } else { - // we don't allow v0 and other versions - revert ErrorIncorrectBatchVersion(); - } - - // verify skippedL1MessageBitmap - _checkSkippedL1MessageBitmap( - _totalL1MessagesPoppedOverall, - _totalL1MessagesPoppedInBatch, - _skippedL1MessageBitmap, - false - ); - - _afterCommitBatch(_batchIndex, _batchHash); - } - /// @inheritdoc IScrollChain /// /// @dev This function will revert unless all V0/V1/V2 batches are finalized. This is because we start to /// pop L1 messages in `commitBatchWithBlobProof` but not in `commitBatch`. We also introduce `finalizedQueueIndex` /// in `L1MessageQueue`. If one of V0/V1/V2 batches not finalized, `L1MessageQueue.pendingQueueIndex` will not /// match `parentBatchHeader.totalL1MessagePopped` and thus revert. + /// + /// @dev This function now only accept batches with version >= 4. And for `_version=5`, we should make sure this + /// batch contains only one empty block, since it is the Euclid initial batch for zkt/mpt transition. function commitBatchWithBlobProof( uint8 _version, bytes calldata _parentBatchHeader, @@ -334,72 +298,33 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { bytes calldata _skippedL1MessageBitmap, bytes calldata _blobDataProof ) external override OnlySequencer whenNotPaused { - if (_version <= 2) { + if (_version < 4) { + // only accept version >= 4 revert ErrorIncorrectBatchVersion(); + } else if (_version == 5) { + // only commit once for Euclid initial batch + if (initialEuclidBatchIndex != 0) revert ErrorBatchIsAlreadyCommitted(); } + // @note We suppose to check v6 batches cannot be committed without initial Euclid Batch. + // However it will introduce extra sload (2000 gas), we let the sequencer to do this check offchain. + // Even if the sequencer commits v6 batches without v5 batch, the security council can still revert it. - // allocate memory of batch header and store entries if necessary, the order matters - // @note why store entries if necessary, to avoid stack overflow problem. - // The codes for `version`, `batchIndex`, `l1MessagePopped`, `totalL1MessagePopped` and `dataHash` - // are the same as `BatchHeaderV0Codec`. - // The codes for `blobVersionedHash`, and `parentBatchHash` are the same as `BatchHeaderV1Codec`. - uint256 batchPtr; - assembly { - batchPtr := mload(0x40) - // This is `BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH`, use `193` here to reduce code - // complexity. Be careful that the length may changed in future versions. - mstore(0x40, add(batchPtr, 193)) - } - BatchHeaderV0Codec.storeVersion(batchPtr, _version); - - (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + uint256 batchIndex = _commitBatchFromV2ToV6( + _version, _parentBatchHeader, - _chunks - ); - BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); - - // versions 2 and 3 both use ChunkCodecV1 - (bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunksV1( - _totalL1MessagesPoppedOverall, _chunks, - _skippedL1MessageBitmap - ); - unchecked { - _totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch; - } - - // verify skippedL1MessageBitmap - _checkSkippedL1MessageBitmap( - _totalL1MessagesPoppedOverall, - _totalL1MessagesPoppedInBatch, _skippedL1MessageBitmap, - true + _blobDataProof ); - BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); - BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); - BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); - - // verify blob versioned hash - bytes32 _blobVersionedHash = _getBlobVersionedHash(); - _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); - BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _blobVersionedHash); - BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); - - uint256 lastBlockTimestamp; - { - bytes memory lastChunk = _chunks[_chunks.length - 1]; - lastBlockTimestamp = ChunkCodecV1.getLastBlockTimestamp(lastChunk); + // Don't allow to commit version 4 after Euclid upgrade. + // This check is to avoid sequencer committing wrong batch due to human error. + // And This check won't introduce much gas overhead (likely less than 100). + if (_version == 4) { + uint256 euclidForkBatchIndex = initialEuclidBatchIndex; + if (euclidForkBatchIndex > 0 && batchIndex > euclidForkBatchIndex) revert ErrorEuclidForkEnabled(); + } else if (_version == 5) { + initialEuclidBatchIndex = batchIndex; } - BatchHeaderV3Codec.storeLastBlockTimestamp(batchPtr, lastBlockTimestamp); - BatchHeaderV3Codec.storeBlobDataProof(batchPtr, _blobDataProof); - - // compute batch hash, V3 has same code as V0 - bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash( - batchPtr, - BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH - ); - - _afterCommitBatch(_batchIndex, _batchHash); } /// @inheritdoc IScrollChain @@ -422,14 +347,20 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { if (_firstBatchIndex <= lastFinalizedBatchIndex) revert ErrorRevertFinalizedBatch(); // actual revert + uint256 _initialEuclidBatchIndex = initialEuclidBatchIndex; for (uint256 _batchIndex = _lastBatchIndex; _batchIndex >= _firstBatchIndex; --_batchIndex) { bytes32 _batchHash = committedBatches[_batchIndex]; committedBatches[_batchIndex] = bytes32(0); + // also revert initial Euclid batch + if (_initialEuclidBatchIndex == _batchIndex) { + initialEuclidBatchIndex = 0; + } + emit RevertBatch(_batchIndex, _batchHash); } - // `getL1MessagePopped` codes are the same in V0, V1, V2, V3 + // `getL1MessagePopped` codes are the same in V0~V6 uint256 l1MessagePoppedFirstBatch = BatchHeaderV0Codec.getL1MessagePopped(firstBatchPtr); unchecked { IL1MessageQueue(messageQueue).resetPoppedCrossDomainMessage( @@ -438,149 +369,67 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { } } - /* This function will never be used since we already upgrade to 4844. We comment out the codes for reference. - /// @inheritdoc IScrollChain - function finalizeBatchWithProof( - bytes calldata _batchHeader, - bytes32 _prevStateRoot, - bytes32 _postStateRoot, - bytes32 _withdrawRoot, - bytes calldata _aggrProof - ) external override OnlyProver whenNotPaused { - (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( - _batchHeader, - _postStateRoot - ); - - // compute public input hash - bytes32 _publicInputHash; - { - bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); - bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; - _publicInputHash = keccak256( - abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash) - ); - } - // verify batch - IRollupVerifier(verifier).verifyAggregateProof(0, _batchIndex, _aggrProof, _publicInputHash); - - // Pop finalized and non-skipped message from L1MessageQueue. - uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); - _popL1MessagesMemory( - BatchHeaderV0Codec.getSkippedBitmapPtr(batchPtr), - _totalL1MessagesPoppedOverall, - BatchHeaderV0Codec.getL1MessagePopped(batchPtr) - ); - - _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); - } - */ - - /// @inheritdoc IScrollChain - /// @dev Memory layout of `_blobDataProof`: - /// ```text - /// | z | y | kzg_commitment | kzg_proof | - /// |---------|---------|----------------|-----------| - /// | bytes32 | bytes32 | bytes48 | bytes48 | - /// ``` - function finalizeBatchWithProof4844( - bytes calldata _batchHeader, - bytes32, /*_prevStateRoot*/ - bytes32 _postStateRoot, - bytes32 _withdrawRoot, - bytes calldata _blobDataProof, - bytes calldata _aggrProof - ) external override OnlyProver whenNotPaused { - (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( - _batchHeader, - _postStateRoot - ); - - // compute public input hash - bytes32 _publicInputHash; - { - bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); - bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(batchPtr); - bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; - // verify blob versioned hash - _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); - _publicInputHash = keccak256( - abi.encodePacked( - layer2ChainId, - _prevStateRoot, - _postStateRoot, - _withdrawRoot, - _dataHash, - _blobDataProof[0:64], - _blobVersionedHash - ) - ); - } - - // load version from batch header, it is always the first byte. - uint256 batchVersion = BatchHeaderV0Codec.getVersion(batchPtr); - // verify batch - IRollupVerifier(verifier).verifyAggregateProof(batchVersion, _batchIndex, _aggrProof, _publicInputHash); - - // Pop finalized and non-skipped message from L1MessageQueue. - uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); - _popL1MessagesMemory( - BatchHeaderV1Codec.getSkippedBitmapPtr(batchPtr), - _totalL1MessagesPoppedOverall, - BatchHeaderV0Codec.getL1MessagePopped(batchPtr) - ); - - _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); - } - /// @inheritdoc IScrollChain + /// @dev All batches in the given bundle should have the same version and version <= 4 or version >= 6. function finalizeBundleWithProof( - bytes calldata _batchHeader, - bytes32 _postStateRoot, - bytes32 _withdrawRoot, - bytes calldata _aggrProof + bytes calldata batchHeader, + bytes32 postStateRoot, + bytes32 withdrawRoot, + bytes calldata aggrProof ) external override OnlyProver whenNotPaused { - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute pending batch hash and verify + // actions before verification ( - uint256 batchPtr, - bytes32 _batchHash, - uint256 _batchIndex, - uint256 _totalL1MessagesPoppedOverall - ) = _loadBatchHeader(_batchHeader); + uint256 version, + bytes32 batchHash, + uint256 batchIndex, + uint256 totalL1MessagesPoppedOverall, + uint256 prevBatchIndex + ) = _beforeFinalizeBatch(batchHeader, postStateRoot); - // retrieve finalized state root and batch hash from storage to construct the public input - uint256 _finalizedBatchIndex = lastFinalizedBatchIndex; - if (_batchIndex <= _finalizedBatchIndex) revert ErrorBatchIsAlreadyVerified(); + uint256 euclidForkBatchIndex = initialEuclidBatchIndex; + // Make sure we don't finalize v4, v5 and v6 batches in the same bundle, that + // means `batchIndex < euclidForkBatchIndex` or `prevBatchIndex >= euclidForkBatchIndex`. + if (prevBatchIndex < euclidForkBatchIndex && euclidForkBatchIndex <= batchIndex) { + revert ErrorFinalizePreAndPostEuclidBatchInOneBundle(); + } - bytes memory _publicInput = abi.encodePacked( + bytes memory publicInputs = abi.encodePacked( layer2ChainId, - uint32(_batchIndex - _finalizedBatchIndex), // numBatches - finalizedStateRoots[_finalizedBatchIndex], // _prevStateRoot - committedBatches[_finalizedBatchIndex], // _prevBatchHash - _postStateRoot, - _batchHash, - _withdrawRoot + uint32(batchIndex - prevBatchIndex), // numBatches + finalizedStateRoots[prevBatchIndex], // _prevStateRoot + committedBatches[prevBatchIndex], // _prevBatchHash + postStateRoot, + batchHash, + withdrawRoot ); - // load version from batch header, it is always the first byte. - uint256 batchVersion = BatchHeaderV0Codec.getVersion(batchPtr); - // verify bundle, choose the correct verifier based on the last batch // our off-chain service will make sure all unfinalized batches have the same batch version. - IRollupVerifier(verifier).verifyBundleProof(batchVersion, _batchIndex, _aggrProof, _publicInput); + IRollupVerifier(verifier).verifyBundleProof(version, batchIndex, aggrProof, publicInputs); - // store in state - // @note we do not store intermediate finalized roots - lastFinalizedBatchIndex = _batchIndex; - finalizedStateRoots[_batchIndex] = _postStateRoot; - withdrawRoots[_batchIndex] = _withdrawRoot; + // actions after verification + _afterFinalizeBatch(batchIndex, batchHash, totalL1MessagesPoppedOverall, postStateRoot, withdrawRoot); + } - // Pop finalized and non-skipped message from L1MessageQueue. - _finalizePoppedL1Messages(_totalL1MessagesPoppedOverall); + /// @inheritdoc IScrollChain + /// @dev This function will only allow security council to call once. + function finalizeEuclidInitialBatch(bytes32 postStateRoot) external override onlyOwner { + if (postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + uint256 batchIndex = initialEuclidBatchIndex; + // make sure only finalize once + if (finalizedStateRoots[batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + // all v4 batches should be finalized + if (lastFinalizedBatchIndex + 1 != batchIndex) revert ErrorNotAllV4BatchFinalized(); - emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + // update storage + lastFinalizedBatchIndex = batchIndex; + // batch is guaranteed to contain a single empty block, so withdraw root does not change + bytes32 withdrawRoot = withdrawRoots[batchIndex - 1]; + finalizedStateRoots[batchIndex] = postStateRoot; + withdrawRoots[batchIndex] = withdrawRoot; + + emit FinalizeBatch(batchIndex, committedBatches[batchIndex], postStateRoot, withdrawRoot); } /************************ @@ -673,7 +522,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted(); } - /// @dev Internal function to do common checks after actual batch committing. + /// @dev Internal function to do common actions after actual batch committing. /// @param _batchIndex The index of current batch. /// @param _batchHash The hash of current batch. function _afterCommitBatch(uint256 _batchIndex, bytes32 _batchHash) private { @@ -681,72 +530,48 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { emit CommitBatch(_batchIndex, _batchHash); } - /// @dev Internal function to do common checks before actual batch finalization. - /// @param _batchHeader The current batch header in calldata. - /// @param _postStateRoot The state root after current batch. - /// @return batchPtr The start memory offset of current batch in memory. - /// @return _batchHash The hash of current batch. - /// @return _batchIndex The index of current batch. - function _beforeFinalizeBatch(bytes calldata _batchHeader, bytes32 _postStateRoot) + /// @dev Internal function to do common actions before actual batch finalization. + function _beforeFinalizeBatch(bytes calldata batchHeader, bytes32 postStateRoot) internal view returns ( - uint256 batchPtr, - bytes32 _batchHash, - uint256 _batchIndex + uint256 version, + bytes32 batchHash, + uint256 batchIndex, + uint256 totalL1MessagesPoppedOverall, + uint256 prevBatchIndex ) { - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + if (postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + uint256 batchPtr; + // compute pending batch hash and verify + (batchPtr, batchHash, batchIndex, totalL1MessagesPoppedOverall) = _loadBatchHeader(batchHeader); - // compute batch hash and verify - (batchPtr, _batchHash, _batchIndex, ) = _loadBatchHeader(_batchHeader); + // make sure don't finalize batch multiple times + prevBatchIndex = lastFinalizedBatchIndex; + if (batchIndex <= prevBatchIndex) revert ErrorBatchIsAlreadyVerified(); - // avoid duplicated verification - if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + version = BatchHeaderV0Codec.getVersion(batchPtr); } - /// @dev Internal function to do common checks after actual batch finalization. - /// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped after current batch. - /// @param _batchIndex The index of current batch. - /// @param _batchHash The hash of current batch. - /// @param _postStateRoot The state root after current batch. - /// @param _withdrawRoot The withdraw trie root after current batch. + /// @dev Internal function to do common actions after actual batch finalization. function _afterFinalizeBatch( - uint256 _totalL1MessagesPoppedOverall, - uint256 _batchIndex, - bytes32 _batchHash, - bytes32 _postStateRoot, - bytes32 _withdrawRoot + uint256 batchIndex, + bytes32 batchHash, + uint256 totalL1MessagesPoppedOverall, + bytes32 postStateRoot, + bytes32 withdrawRoot ) internal { - // check and update lastFinalizedBatchIndex - unchecked { - if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex(); - lastFinalizedBatchIndex = _batchIndex; - } - - // record state root and withdraw root - finalizedStateRoots[_batchIndex] = _postStateRoot; - withdrawRoots[_batchIndex] = _withdrawRoot; + // @note we do not store intermediate finalized roots + lastFinalizedBatchIndex = batchIndex; + finalizedStateRoots[batchIndex] = postStateRoot; + withdrawRoots[batchIndex] = withdrawRoot; // Pop finalized and non-skipped message from L1MessageQueue. - _finalizePoppedL1Messages(_totalL1MessagesPoppedOverall); + _finalizePoppedL1Messages(totalL1MessagesPoppedOverall); - emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); - } - - /// @dev Internal function to check blob versioned hash. - /// @param _blobVersionedHash The blob versioned hash to check. - /// @param _blobDataProof The blob data proof used to verify the blob versioned hash. - function _checkBlobVersionedHash(bytes32 _blobVersionedHash, bytes calldata _blobDataProof) internal view { - // Calls the point evaluation precompile and verifies the output - (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( - abi.encodePacked(_blobVersionedHash, _blobDataProof) - ); - // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the - // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile - if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); - (, uint256 result) = abi.decode(data, (uint256, uint256)); - if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); + emit FinalizeBatch(batchIndex, batchHash, postStateRoot, withdrawRoot); } /// @dev Internal function to check the `SkippedL1MessageBitmap`. @@ -775,6 +600,26 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { } } + /// @dev Internal function to get and check the blob versioned hash. + /// @param _blobDataProof The blob data proof passing to point evaluation precompile. + /// @return _blobVersionedHash The retrieved blob versioned hash. + function _getAndCheckBlobVersionedHash(bytes calldata _blobDataProof) + internal + returns (bytes32 _blobVersionedHash) + { + _blobVersionedHash = _getBlobVersionedHash(); + + // Calls the point evaluation precompile and verifies the output + (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( + abi.encodePacked(_blobVersionedHash, _blobDataProof) + ); + // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the + // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile + if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); + (, uint256 result) = abi.decode(data, (uint256, uint256)); + if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); + } + /// @dev Internal function to get the blob versioned hash. /// @return _blobVersionedHash The retrieved blob versioned hash. function _getBlobVersionedHash() internal virtual returns (bytes32 _blobVersionedHash) { @@ -788,50 +633,92 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlobs(); } - /// @dev Internal function to commit chunks with version 0 - /// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks. - /// @param _chunks The list of chunks to commit. - /// @param _skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not. - /// @return _batchDataHash The computed data hash for the list of chunks. - /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one. - function _commitChunksV0( - uint256 _totalL1MessagesPoppedOverall, - bytes[] memory _chunks, - bytes calldata _skippedL1MessageBitmap - ) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { - uint256 _chunksLength = _chunks.length; - - // load `batchDataHashPtr` and reserve the memory region for chunk data hashes - uint256 batchDataHashPtr; + /// @dev We make sure v5 batch only contains one empty block here. + function _validateV5Batch(bytes[] memory chunks) internal pure { + if (chunks.length != 1) revert ErrorV5BatchNotContainsOnlyOneChunk(); + bytes memory chunk = chunks[0]; + uint256 chunkPtr; + uint256 blockPtr; assembly { - batchDataHashPtr := mload(0x40) - mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32))) + chunkPtr := add(chunk, 0x20) // skip chunkLength + blockPtr := add(chunkPtr, 1) } - // compute the data hash for each chunk - for (uint256 i = 0; i < _chunksLength; i++) { - uint256 _totalNumL1MessagesInChunk; - bytes32 _chunkDataHash; - (_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV0( - _chunks[i], - _totalL1MessagesPoppedInBatch, - _totalL1MessagesPoppedOverall, - _skippedL1MessageBitmap - ); - unchecked { - _totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk; - _totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk; - } - assembly { - mstore(batchDataHashPtr, _chunkDataHash) - batchDataHashPtr := add(batchDataHashPtr, 0x20) - } + uint256 numBlocks = ChunkCodecV1.validateChunkLength(chunkPtr, chunk.length); + if (numBlocks != 1) revert ErrorV5BatchNotContainsOnlyOneBlock(); + uint256 numTransactions = ChunkCodecV1.getNumTransactions(blockPtr); + if (numTransactions != 0) revert ErrorV5BatchContainsTransactions(); + } + + /// @dev Internal function to commit batches from V2 to V6 (except V5, since it is Euclid initial batch) + function _commitBatchFromV2ToV6( + uint8 _version, + bytes calldata _parentBatchHeader, + bytes[] memory _chunks, + bytes calldata _skippedL1MessageBitmap, + bytes calldata _blobDataProof + ) internal returns (uint256) { + // do extra checks for batch v5. + if (_version == 5) { + _validateV5Batch(_chunks); } - assembly { - let dataLen := mul(_chunksLength, 0x20) - _batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen) + // allocate memory of batch header and store entries if necessary, the order matters + // @note why store entries if necessary, to avoid stack overflow problem. + // The codes for `version`, `batchIndex`, `l1MessagePopped`, `totalL1MessagePopped` and `dataHash` + // are the same as `BatchHeaderV0Codec`. + // The codes for `blobVersionedHash`, and `parentBatchHash` are the same as `BatchHeaderV1Codec`. + uint256 batchPtr = BatchHeaderV3Codec.allocate(); + BatchHeaderV0Codec.storeVersion(batchPtr, _version); + + (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + _parentBatchHeader, + _chunks + ); + BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); + + // versions 2 to 6 both use ChunkCodecV1 + (bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunksV1( + _totalL1MessagesPoppedOverall, + _chunks, + _skippedL1MessageBitmap + ); + unchecked { + _totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch; } + + // verify skippedL1MessageBitmap + _checkSkippedL1MessageBitmap( + _totalL1MessagesPoppedOverall, + _totalL1MessagesPoppedInBatch, + _skippedL1MessageBitmap, + true + ); + BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); + BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); + BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); + + // verify blob versioned hash + BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _getAndCheckBlobVersionedHash(_blobDataProof)); + BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); + + uint256 lastBlockTimestamp; + { + bytes memory lastChunk = _chunks[_chunks.length - 1]; + lastBlockTimestamp = ChunkCodecV1.getLastBlockTimestamp(lastChunk); + } + BatchHeaderV3Codec.storeLastBlockTimestamp(batchPtr, lastBlockTimestamp); + BatchHeaderV3Codec.storeBlobDataProof(batchPtr, _blobDataProof); + + // compute batch hash, V2~V6 has same code as V0 + bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash( + batchPtr, + BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH + ); + + _afterCommitBatch(_batchIndex, _batchHash); + + return _batchIndex; } /// @dev Internal function to commit chunks with version 1 @@ -913,7 +800,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { (batchPtr, _length) = BatchHeaderV3Codec.loadAndValidate(_batchHeader); } - // the code for compute batch hash is the same for V0, V1, V2, V3 + // the code for compute batch hash is the same for V0~V6 // also the `_batchIndex` and `_totalL1MessagesPoppedOverall`. _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); @@ -925,100 +812,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { } } - /// @dev Internal function to commit a chunk with version 0. - /// @param _chunk The encoded chunk to commit. - /// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in the current batch before this chunk. - /// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including the current batch, before this chunk. - /// @param _skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not. - /// @return _dataHash The computed data hash for this chunk. - /// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk - function _commitChunkV0( - bytes memory _chunk, - uint256 _totalL1MessagesPoppedInBatch, - uint256 _totalL1MessagesPoppedOverall, - bytes calldata _skippedL1MessageBitmap - ) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { - uint256 chunkPtr; - uint256 startDataPtr; - uint256 dataPtr; - - assembly { - dataPtr := mload(0x40) - startDataPtr := dataPtr - chunkPtr := add(_chunk, 0x20) // skip chunkLength - } - - uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length); - - // concatenate block contexts, use scope to avoid stack too deep - { - uint256 _totalTransactionsInChunk; - for (uint256 i = 0; i < _numBlocks; i++) { - dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i); - uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV0.BLOCK_CONTEXT_LENGTH; - uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr); - unchecked { - _totalTransactionsInChunk += _numTransactionsInBlock; - } - } - assembly { - mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes - } - } - - // It is used to compute the actual number of transactions in chunk. - uint256 txHashStartDataPtr = dataPtr; - // concatenate tx hashes - uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks); - chunkPtr += 1; - while (_numBlocks > 0) { - // concatenate l1 message hashes - uint256 _numL1MessagesInBlock = ChunkCodecV0.getNumL1Messages(chunkPtr); - dataPtr = _loadL1MessageHashes( - dataPtr, - _numL1MessagesInBlock, - _totalL1MessagesPoppedInBatch, - _totalL1MessagesPoppedOverall, - _skippedL1MessageBitmap - ); - - // concatenate l2 transaction hashes - uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(chunkPtr); - if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs(); - for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) { - bytes32 txHash; - (txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr); - assembly { - mstore(dataPtr, txHash) - dataPtr := add(dataPtr, 0x20) - } - } - - unchecked { - _totalNumL1MessagesInChunk += _numL1MessagesInBlock; - _totalL1MessagesPoppedInBatch += _numL1MessagesInBlock; - _totalL1MessagesPoppedOverall += _numL1MessagesInBlock; - - _numBlocks -= 1; - chunkPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH; - } - } - - // check the actual number of transactions in the chunk - if ((dataPtr - txHashStartDataPtr) / 32 > maxNumTxInChunk) revert ErrorTooManyTxsInOneChunk(); - - assembly { - chunkPtr := add(_chunk, 0x20) - } - // check chunk has correct length - if (l2TxPtr - chunkPtr != _chunk.length) revert ErrorIncompleteL2TransactionData(); - - // compute data hash and store to memory - assembly { - _dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr)) - } - } - /// @dev Internal function to commit a chunk with version 1. /// @param _chunk The encoded chunk to commit. /// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in current batch. @@ -1155,19 +948,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { } } - /// @dev Internal function to pop l1 messages from `skippedL1MessageBitmap` in memory. - /// @param bitmapPtr The memory offset of `skippedL1MessageBitmap` in memory. - /// @param totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including current batch. - /// @param totalL1MessagesPoppedInBatch The number of L1 messages popped in current batch. - function _popL1MessagesMemory( - uint256 bitmapPtr, - uint256 totalL1MessagesPoppedOverall, - uint256 totalL1MessagesPoppedInBatch - ) internal { - if (totalL1MessagesPoppedInBatch == 0) return; - _popL1Messages(false, bitmapPtr, totalL1MessagesPoppedOverall, totalL1MessagesPoppedInBatch); - } - /// @dev Internal function to pop l1 messages from `skippedL1MessageBitmap` in calldata. /// @param skippedL1MessageBitmap The `skippedL1MessageBitmap` in calldata. /// @param totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including current batch. diff --git a/src/libraries/codec/BatchHeaderV3Codec.sol b/src/libraries/codec/BatchHeaderV3Codec.sol index c07014b..40b1d12 100644 --- a/src/libraries/codec/BatchHeaderV3Codec.sol +++ b/src/libraries/codec/BatchHeaderV3Codec.sol @@ -28,6 +28,15 @@ library BatchHeaderV3Codec { /// @dev The length of fixed parts of the batch header. uint256 internal constant BATCH_HEADER_FIXED_LENGTH = 193; + /// @notice Allocate memory for batch header. + function allocate() internal pure returns (uint256 batchPtr) { + assembly { + batchPtr := mload(0x40) + // This is `BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH`, use `193` here to reduce code complexity. + mstore(0x40, add(batchPtr, 193)) + } + } + /// @notice Load batch header in calldata to memory. /// @param _batchHeader The encoded batch header bytes in calldata. /// @return batchPtr The start memory offset of the batch header in memory. @@ -39,10 +48,9 @@ library BatchHeaderV3Codec { } // copy batch header to memory. + batchPtr = allocate(); assembly { - batchPtr := mload(0x40) calldatacopy(batchPtr, _batchHeader.offset, length) - mstore(0x40, add(batchPtr, length)) } } diff --git a/src/libraries/verifier/ZkEvmVerifierPostEuclid.sol b/src/libraries/verifier/ZkEvmVerifierPostEuclid.sol new file mode 100644 index 0000000..2ef2040 --- /dev/null +++ b/src/libraries/verifier/ZkEvmVerifierPostEuclid.sol @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT + +pragma solidity =0.8.24; + +import {IZkEvmVerifierV2} from "./IZkEvmVerifier.sol"; + +// solhint-disable no-inline-assembly + +contract ZkEvmVerifierPostEuclid is IZkEvmVerifierV2 { + /********** + * Errors * + **********/ + + /// @dev Thrown when bundle recursion zk proof verification is failed. + error VerificationFailed(); + + /************* + * Constants * + *************/ + + /// @notice The address of highly optimized plonk verifier contract. + address public immutable plonkVerifier; + + /// @notice A predetermined digest for the `plonkVerifier`. + bytes32 public immutable verifierDigest1; + + /// @notice A predetermined digest for the `plonkVerifier`. + bytes32 public immutable verifierDigest2; + + /*************** + * Constructor * + ***************/ + + constructor( + address _verifier, + bytes32 _verifierDigest1, + bytes32 _verifierDigest2 + ) { + plonkVerifier = _verifier; + verifierDigest1 = _verifierDigest1; + verifierDigest2 = _verifierDigest2; + } + + /************************* + * Public View Functions * + *************************/ + + /// @inheritdoc IZkEvmVerifierV2 + /// + /// @dev Encoding for `publicInput`. And this is exactly the same as `ZkEvmVerifierV2`. + /// ```text + /// | layer2ChainId | numBatches | prevStateRoot | prevBatchHash | postStateRoot | batchHash | withdrawRoot | + /// | 8 bytes | 4 bytes | 32 bytes | 32 bytes | 32 bytes | 32 bytes | 32 bytes | + /// ``` + function verify(bytes calldata bundleProof, bytes calldata publicInput) external view override { + address _verifier = plonkVerifier; + bytes32 _verifierDigest1 = verifierDigest1; + bytes32 _verifierDigest2 = verifierDigest2; + bytes32 publicInputHash = keccak256(publicInput); + bool success; + + // 1. the first 12 * 32 (0x180) bytes of `bundleProof` is `accumulator` + // 2. the rest bytes of `bundleProof` is the actual `bundle_proof` + // 3. Inserted between `accumulator` and `bundle_proof` are + // 32 * 34 (0x440) bytes, such that: + // | start | end | field | + // |---------------|---------------|-------------------------| + // | 0x00 | 0x180 | bundleProof[0x00:0x180] | + // | 0x180 | 0x180 + 0x20 | verifierDigest1 | + // | 0x180 + 0x20 | 0x180 + 0x40 | verifierDigest2 | + // | 0x180 + 0x40 | 0x180 + 0x60 | publicInputHash[0] | + // | 0x180 + 0x60 | 0x180 + 0x80 | publicInputHash[1] | + // ... + // | 0x180 + 0x420 | 0x180 + 0x440 | publicInputHash[31] | + // | 0x180 + 0x440 | dynamic | bundleProof[0x180:] | + assembly { + let p := mload(0x40) + // 1. copy the accumulator's 0x180 bytes + calldatacopy(p, bundleProof.offset, 0x180) + // 2. insert the public input's 0x440 bytes + mstore(add(p, 0x180), _verifierDigest1) // verifierDigest1 + mstore(add(p, 0x1a0), _verifierDigest2) // verifierDigest2 + for { + let i := 0 + } lt(i, 0x400) { + i := add(i, 0x20) + } { + mstore(add(p, sub(0x5a0, i)), and(publicInputHash, 0xff)) + publicInputHash := shr(8, publicInputHash) + } + // 3. copy all remaining bytes from bundleProof + calldatacopy(add(p, 0x5c0), add(bundleProof.offset, 0x180), sub(bundleProof.length, 0x180)) + // 4. call plonk verifier + success := staticcall(gas(), _verifier, p, add(bundleProof.length, 0x440), 0x00, 0x00) + } + if (!success) { + revert VerificationFailed(); + } + } +} diff --git a/src/mocks/ScrollChainMockBlob.sol b/src/mocks/ScrollChainMockBlob.sol index 74aa8fe..a384b63 100644 --- a/src/mocks/ScrollChainMockBlob.sol +++ b/src/mocks/ScrollChainMockBlob.sol @@ -72,35 +72,7 @@ contract ScrollChainMockBlob is ScrollChain { uint256 _totalL1MessagesPoppedOverall ) { - // load version from batch header, it is always the first byte. - uint256 version; - assembly { - version := shr(248, calldataload(_batchHeader.offset)) - } - - uint256 _length; - if (version == 0) { - (batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader); - } else if (version <= 2) { - (batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader); - } else if (version >= 3) { - (batchPtr, _length) = BatchHeaderV3Codec.loadAndValidate(_batchHeader); - } - - // the code for compute batch hash is the same for V0, V1, V2, V3 - // also the `_batchIndex` and `_totalL1MessagesPoppedOverall`. - _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); - _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); - _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); - - // only check when genesis is imported - if ( - !overrideBatchHashCheck && - committedBatches[_batchIndex] != _batchHash && - finalizedStateRoots[0] != bytes32(0) - ) { - revert ErrorIncorrectBatchHash(); - } + (batchPtr, _batchHash, _batchIndex, _totalL1MessagesPoppedOverall) = ScrollChain._loadBatchHeader(_batchHeader); if (overrideBatchHashCheck) { _batchHash = committedBatches[_batchIndex]; diff --git a/src/mocks/ScrollChainMockFinalize.sol b/src/mocks/ScrollChainMockFinalize.sol index e4ff375..d181324 100644 --- a/src/mocks/ScrollChainMockFinalize.sol +++ b/src/mocks/ScrollChainMockFinalize.sol @@ -4,9 +4,6 @@ pragma solidity =0.8.24; import {ScrollChain} from "../L1/rollup/ScrollChain.sol"; -import {BatchHeaderV0Codec} from "../libraries/codec/BatchHeaderV0Codec.sol"; -import {BatchHeaderV1Codec} from "../libraries/codec/BatchHeaderV1Codec.sol"; - contract ScrollChainMockFinalize is ScrollChain { /*************** * Constructor * @@ -27,57 +24,19 @@ contract ScrollChainMockFinalize is ScrollChain { * Public Mutating Functions * *****************************/ - /// @notice Finalize 4844 batch without proof, See the comments of {ScrollChain-finalizeBatchWithProof4844}. - function finalizeBatch4844( - bytes calldata _batchHeader, - bytes32, /*_prevStateRoot*/ - bytes32 _postStateRoot, - bytes32 _withdrawRoot, - bytes calldata _blobDataProof - ) external OnlyProver whenNotPaused { - (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( - _batchHeader, - _postStateRoot - ); - - // verify blob versioned hash - bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(batchPtr); - _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); - - // Pop finalized and non-skipped message from L1MessageQueue. - uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); - _popL1MessagesMemory( - BatchHeaderV1Codec.getSkippedBitmapPtr(batchPtr), - _totalL1MessagesPoppedOverall, - BatchHeaderV0Codec.getL1MessagePopped(batchPtr) - ); - - _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); - } - /// @notice Finalize bundle without proof, See the comments of {ScrollChain-finalizeBundleWithProof}. function finalizeBundle( - bytes calldata _batchHeader, - bytes32 _postStateRoot, - bytes32 _withdrawRoot + bytes calldata batchHeader, + bytes32 postStateRoot, + bytes32 withdrawRoot ) external OnlyProver whenNotPaused { - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute pending batch hash and verify - (, bytes32 _batchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader( - _batchHeader + // actions before verification + (, bytes32 batchHash, uint256 batchIndex, uint256 totalL1MessagesPoppedOverall, ) = _beforeFinalizeBatch( + batchHeader, + postStateRoot ); - if (_batchIndex <= lastFinalizedBatchIndex) revert ErrorBatchIsAlreadyVerified(); - - // store in state - // @note we do not store intermediate finalized roots - lastFinalizedBatchIndex = _batchIndex; - finalizedStateRoots[_batchIndex] = _postStateRoot; - withdrawRoots[_batchIndex] = _withdrawRoot; - - // Pop finalized and non-skipped message from L1MessageQueue. - _finalizePoppedL1Messages(_totalL1MessagesPoppedOverall); - emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + // actions after verification + _afterFinalizeBatch(batchIndex, batchHash, totalL1MessagesPoppedOverall, postStateRoot, withdrawRoot); } } diff --git a/src/test/L1GatewayTestBase.t.sol b/src/test/L1GatewayTestBase.t.sol index c588935..9f91b45 100644 --- a/src/test/L1GatewayTestBase.t.sol +++ b/src/test/L1GatewayTestBase.t.sol @@ -161,29 +161,24 @@ abstract contract L1GatewayTestBase is ScrollTestBase { chunk0[0] = bytes1(uint8(1)); // one block in this chunk chunks[0] = chunk0; hevm.startPrank(address(0)); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), blobDataProof); hevm.stopPrank(); - bytes memory batchHeader1 = new bytes(121); + bytes memory batchHeader1 = new bytes(193); assembly { - mstore8(add(batchHeader1, 0x20), 1) // version + mstore8(add(batchHeader1, 0x20), 4) // version mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex - mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped - mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped + mstore(add(batchHeader1, add(0x20, 9)), shl(192, 0)) // l1MessagePopped + mstore(add(batchHeader1, add(0x20, 17)), shl(192, 0)) // totalL1MessagePopped mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash + mstore(add(batchHeader1, add(0x20, 57)), 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757) // blobVersionedHash mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash + mstore(add(batchHeader1, add(0x20, 121)), 0) // lastBlockTimestamp + mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof } hevm.startPrank(address(0)); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - messageHash, - blobDataProof, - new bytes(0) - ); + rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(2)), messageHash, new bytes(0)); hevm.stopPrank(); } } diff --git a/src/test/ScrollChain.t.sol b/src/test/ScrollChain.t.sol index a5eb861..4ab04df 100644 --- a/src/test/ScrollChain.t.sol +++ b/src/test/ScrollChain.t.sol @@ -33,6 +33,11 @@ contract ScrollChainTest is DSTestPlus { event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot); event RevertBatch(uint256 indexed batchIndex, bytes32 indexed batchHash); + // from L1MessageQueue + event DequeueTransaction(uint256 startIndex, uint256 count, uint256 skippedBitmap); + event ResetDequeuedTransaction(uint256 startIndex); + event FinalizedDequeuedTransaction(uint256 finalizedIndex); + ProxyAdmin internal admin; EmptyContract private placeholder; @@ -69,14 +74,13 @@ contract ScrollChainTest is DSTestPlus { rollup.initialize(address(messageQueue), address(0), 100); } - function testCommitBatchV1() external { + function testCommitBatchV3() external { bytes memory batchHeader0 = new bytes(89); // import 10 L1 messages for (uint256 i = 0; i < 10; i++) { messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); } - // import genesis batch first assembly { mstore(add(batchHeader0, add(0x20, 25)), 1) @@ -86,43 +90,40 @@ contract ScrollChainTest is DSTestPlus { // caller not sequencer, revert hevm.expectRevert(ScrollChain.ErrorCallerIsNotSequencer.selector); - rollup.commitBatch(1, batchHeader0, new bytes[](0), new bytes(0)); - + rollup.commitBatchWithBlobProof(4, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); rollup.addSequencer(address(0)); - // batch is empty, revert + // revert when ErrorIncorrectBatchVersion hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorBatchIsEmpty.selector); - rollup.commitBatch(1, batchHeader0, new bytes[](0), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorIncorrectBatchVersion.selector); + rollup.commitBatchWithBlobProof(2, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorIncorrectBatchVersion.selector); + rollup.commitBatchWithBlobProof(3, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); hevm.stopPrank(); - // batch header length too small, revert - bytes memory header = new bytes(120); - assembly { - mstore8(add(header, 0x20), 1) // version - } + // revert when ErrorBatchIsEmpty hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV1Codec.ErrorBatchHeaderV1LengthTooSmall.selector); - rollup.commitBatch(1, header, new bytes[](1), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorBatchIsEmpty.selector); + rollup.commitBatchWithBlobProof(4, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); hevm.stopPrank(); - // wrong bitmap length, revert - header = new bytes(122); + // revert when ErrorBatchHeaderV3LengthMismatch + bytes memory header = new bytes(192); assembly { - mstore8(add(header, 0x20), 1) // version + mstore8(add(header, 0x20), 4) // version } hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV1Codec.ErrorIncorrectBitmapLengthV1.selector); - rollup.commitBatch(1, header, new bytes[](1), new bytes(0)); + hevm.expectRevert(BatchHeaderV3Codec.ErrorBatchHeaderV3LengthMismatch.selector); + rollup.commitBatchWithBlobProof(4, header, new bytes[](1), new bytes(0), new bytes(0)); hevm.stopPrank(); - // incorrect parent batch hash, revert + // revert when ErrorIncorrectBatchHash assembly { mstore(add(batchHeader0, add(0x20, 25)), 2) // change data hash for batch0 } hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.commitBatch(1, batchHeader0, new bytes[](1), new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, new bytes[](1), new bytes(0), new bytes(0)); hevm.stopPrank(); assembly { mstore(add(batchHeader0, add(0x20, 25)), 1) // change back @@ -136,7 +137,7 @@ contract ScrollChainTest is DSTestPlus { chunks[0] = chunk0; hevm.startPrank(address(0)); hevm.expectRevert(ChunkCodecV1.ErrorNoBlockInChunkV1.selector); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), new bytes(0)); hevm.stopPrank(); // invalid chunk length, revert @@ -145,7 +146,7 @@ contract ScrollChainTest is DSTestPlus { chunks[0] = chunk0; hevm.startPrank(address(0)); hevm.expectRevert(ChunkCodecV1.ErrorIncorrectChunkLengthV1.selector); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), new bytes(0)); hevm.stopPrank(); // cannot skip last L1 message, revert @@ -158,7 +159,7 @@ contract ScrollChainTest is DSTestPlus { chunks[0] = chunk0; hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorLastL1MessageSkipped.selector); - rollup.commitBatch(1, batchHeader0, chunks, bitmap); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, bitmap, new bytes(0)); hevm.stopPrank(); // num txs less than num L1 msgs, revert @@ -171,7 +172,7 @@ contract ScrollChainTest is DSTestPlus { chunks[0] = chunk0; hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorNumTxsLessThanNumL1Msgs.selector); - rollup.commitBatch(1, batchHeader0, chunks, bitmap); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, bitmap, new bytes(0)); hevm.stopPrank(); // revert when ErrorNoBlobFound @@ -180,7 +181,7 @@ contract ScrollChainTest is DSTestPlus { chunks[0] = chunk0; hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorNoBlobFound.selector); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), new bytes(0)); hevm.stopPrank(); // @note we cannot check `ErrorFoundMultipleBlobs` here @@ -192,65 +193,65 @@ contract ScrollChainTest is DSTestPlus { rollup.verifier() ); admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); - // this is keccak(""); - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash( - 0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 - ); + // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 + bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; + bytes + memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; + ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); + + chunk0 = new bytes(1 + 60); + chunk0[0] = bytes1(uint8(1)); // one block in this chunk + chunks[0] = chunk0; + // revert when ErrorCallPointEvaluationPrecompileFailed + hevm.startPrank(address(0)); + hevm.expectRevert(ScrollChain.ErrorCallPointEvaluationPrecompileFailed.selector); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), new bytes(0)); + hevm.stopPrank(); bytes32 batchHash0 = rollup.committedBatches(0); - bytes memory batchHeader1 = new bytes(121); + bytes memory batchHeader1 = new bytes(193); assembly { - mstore8(add(batchHeader1, 0x20), 1) // version + mstore8(add(batchHeader1, 0x20), 4) // version mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), 0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) // blobVersionedHash + mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash + mstore(add(batchHeader1, add(0x20, 121)), 0) // lastBlockTimestamp + mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof } + // hash is ed32768c5f910a11edaf1c1ec0c0da847def9d24e0a24567c3c3d284061cf935 - // commit batch with one chunk, no tx, correctly - chunk0 = new bytes(1 + 60); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; + // succeed hevm.startPrank(address(0)); assertEq(rollup.committedBatches(1), bytes32(0)); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), blobDataProof); hevm.stopPrank(); assertEq(rollup.committedBatches(1), keccak256(batchHeader1)); - // batch is already committed, revert + // revert when ErrorBatchIsAlreadyCommitted hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyCommitted.selector); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); - hevm.stopPrank(); - - // revert when ErrorIncorrectBatchVersion - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorIncorrectBatchVersion.selector); - rollup.commitBatch(3, batchHeader1, chunks, new bytes(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), blobDataProof); hevm.stopPrank(); } - function testFinalizeBatchWithProof4844() external { + function testFinalizeBundleWithProof() external { // caller not prover, revert hevm.expectRevert(ScrollChain.ErrorCallerIsNotProver.selector); - rollup.finalizeBatchWithProof4844(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0), new bytes(0)); + rollup.finalizeBundleWithProof(new bytes(0), bytes32(0), bytes32(0), new bytes(0)); rollup.addProver(address(0)); rollup.addSequencer(address(0)); - bytes memory batchHeader0 = new bytes(89); - // import genesis batch + bytes memory batchHeader0 = new bytes(89); assembly { mstore(add(batchHeader0, add(0x20, 25)), 1) } rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - bytes[] memory chunks = new bytes[](1); - bytes memory chunk0; - // upgrade to ScrollChainMockBlob ScrollChainMockBlob impl = new ScrollChainMockBlob( rollup.layer2ChainId(), @@ -264,119 +265,85 @@ contract ScrollChainTest is DSTestPlus { memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); + bytes[] memory chunks = new bytes[](1); + bytes memory chunk0; + bytes32 batchHash0 = rollup.committedBatches(0); - bytes memory batchHeader1 = new bytes(121); + bytes memory batchHeader1 = new bytes(193); assembly { - mstore8(add(batchHeader1, 0x20), 1) // version + mstore8(add(batchHeader1, 0x20), 4) // version mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash + mstore(add(batchHeader1, add(0x20, 121)), 0) // lastBlockTimestamp + mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof } - // batch hash is 0xf7d9af8c2c8e1a84f1fa4b6af9425f85c50a61b24cdd28101a5f6d781906a5b9 + // hash is ed32768c5f910a11edaf1c1ec0c0da847def9d24e0a24567c3c3d284061cf935 // commit one batch chunk0 = new bytes(1 + 60); chunk0[0] = bytes1(uint8(1)); // one block in this chunk chunks[0] = chunk0; hevm.startPrank(address(0)); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); + assertEq(rollup.committedBatches(1), bytes32(0)); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, new bytes(0), blobDataProof); hevm.stopPrank(); assertEq(rollup.committedBatches(1), keccak256(batchHeader1)); - // incorrect batch hash, revert - batchHeader1[1] = bytes1(uint8(1)); // change random byte + // revert when ErrorStateRootIsZero hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(0), - new bytes(0), - new bytes(0) - ); + hevm.expectRevert(ScrollChain.ErrorStateRootIsZero.selector); + rollup.finalizeBundleWithProof(batchHeader1, bytes32(0), bytes32(0), new bytes(0)); hevm.stopPrank(); - batchHeader1[1] = bytes1(uint8(0)); // change back - // batch header length too small, revert - bytes memory header = new bytes(120); + // revert when ErrorBatchHeaderV3LengthMismatch + bytes memory header = new bytes(192); assembly { - mstore8(add(header, 0x20), 1) // version + mstore8(add(header, 0x20), 4) // version } hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV1Codec.ErrorBatchHeaderV1LengthTooSmall.selector); - rollup.finalizeBatchWithProof4844( - header, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(0), - new bytes(0), - new bytes(0) - ); + hevm.expectRevert(BatchHeaderV3Codec.ErrorBatchHeaderV3LengthMismatch.selector); + rollup.finalizeBundleWithProof(header, bytes32(uint256(1)), bytes32(uint256(2)), new bytes(0)); hevm.stopPrank(); - // wrong bitmap length, revert - header = new bytes(122); - assembly { - mstore8(add(header, 0x20), 1) // version - } + // revert when ErrorIncorrectBatchHash + batchHeader1[1] = bytes1(uint8(1)); // change random byte hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV1Codec.ErrorIncorrectBitmapLengthV1.selector); - rollup.finalizeBatchWithProof4844( - header, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(0), - new bytes(0), - new bytes(0) - ); + hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); + rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(1)), bytes32(uint256(2)), new bytes(0)); hevm.stopPrank(); + batchHeader1[1] = bytes1(uint8(0)); // change back // verify success assertBoolEq(rollup.isBatchFinalized(1), false); hevm.startPrank(address(0)); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(uint256(3)), - blobDataProof, - new bytes(0) - ); + rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); hevm.stopPrank(); assertBoolEq(rollup.isBatchFinalized(1), true); assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2))); assertEq(rollup.withdrawRoots(1), bytes32(uint256(3))); assertEq(rollup.lastFinalizedBatchIndex(), 1); - // batch already verified, revert + // revert when ErrorBatchIsAlreadyVerified hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyVerified.selector); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(uint256(3)), - blobDataProof, - new bytes(0) - ); + rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); hevm.stopPrank(); } - function testCommitAndFinalizeWithL1MessagesV1() external { - rollup.addSequencer(address(0)); - rollup.addProver(address(0)); - - // import 300 L1 messages - for (uint256 i = 0; i < 300; i++) { - messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); - } - + function _commitBatchV3() + internal + returns ( + bytes memory batchHeader0, + bytes memory batchHeader1, + bytes memory batchHeader2 + ) + { // import genesis batch first - bytes memory batchHeader0 = new bytes(89); + batchHeader0 = new bytes(89); assembly { mstore(add(batchHeader0, add(0x20, 25)), 1) } @@ -404,40 +371,44 @@ contract ScrollChainTest is DSTestPlus { // commit batch1, one chunk with one block, 1 tx, 1 L1 message, no skip // => payload for data hash of chunk0 // 0000000000000000 - // 0000000000000000 + // 0000000000000123 // 0000000000000000000000000000000000000000000000000000000000000000 // 0000000000000000 // 0001 // a2277fd30bbbe74323309023b56035b376d7768ad237ae4fc46ead7dc9591ae1 // => data hash for chunk0 - // 9ef1e5694bdb014a1eea42be756a8f63bfd8781d6332e9ef3b5126d90c62f110 + // 5972b8fa626c873a97abb6db14fb0cb2085e050a6f80ec90b92bb0bbaa12eb5a // => data hash for all chunks - // d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3 + // f6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359 // => payload for batch header - // 01 + // 03 // 0000000000000001 // 0000000000000001 // 0000000000000001 - // d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3 + // f6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359 // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 // 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61 - // 0000000000000000000000000000000000000000000000000000000000000000 + // 0000000000000123 + // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 + // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 // => hash for batch header - // 66b68a5092940d88a8c6f203d2071303557c024275d8ceaa2e12662bc61c8d8f - bytes memory batchHeader1 = new bytes(121 + 32); + // 07e1bede8c5047cf8ca7ac84f5390837fb6224953af83d7e967488fa63a2065e + batchHeader1 = new bytes(193); assembly { - mstore8(add(batchHeader1, 0x20), 1) // version + mstore8(add(batchHeader1, 0x20), 4) // version mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1 mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1 mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1 - mstore(add(batchHeader1, add(0x20, 25)), 0xd9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3) // dataHash + mstore(add(batchHeader1, add(0x20, 25)), 0xf6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359) // dataHash mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash - mstore(add(batchHeader1, add(0x20, 121)), 0) // bitmap0 + mstore(add(batchHeader1, add(0x20, 121)), shl(192, 0x123)) // lastBlockTimestamp + mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof } chunk0 = new bytes(1 + 60); assembly { mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 + mstore(add(chunk0, add(0x21, 8)), shl(192, 0x123)) // timestamp = 0x123 mstore(add(chunk0, add(0x21, 56)), shl(240, 1)) // numTransactions = 1 mstore(add(chunk0, add(0x21, 58)), shl(240, 1)) // numL1Messages = 1 } @@ -447,106 +418,92 @@ contract ScrollChainTest is DSTestPlus { hevm.startPrank(address(0)); hevm.expectEmit(true, true, false, true); emit CommitBatch(1, keccak256(batchHeader1)); - rollup.commitBatch(1, batchHeader0, chunks, bitmap); + rollup.commitBatchWithBlobProof(4, batchHeader0, chunks, bitmap, blobDataProof); hevm.stopPrank(); assertBoolEq(rollup.isBatchFinalized(1), false); bytes32 batchHash1 = rollup.committedBatches(1); assertEq(batchHash1, keccak256(batchHeader1)); - - // finalize batch1 - hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit FinalizeBatch(1, batchHash1, bytes32(uint256(2)), bytes32(uint256(3))); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(uint256(3)), - blobDataProof, - new bytes(0) - ); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), true); - assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2))); - assertEq(rollup.withdrawRoots(1), bytes32(uint256(3))); - assertEq(rollup.lastFinalizedBatchIndex(), 1); + assertEq(1, messageQueue.pendingQueueIndex()); + assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); assertBoolEq(messageQueue.isMessageSkipped(0), false); - assertEq(messageQueue.pendingQueueIndex(), 1); // commit batch2 with two chunks, correctly // 1. chunk0 has one block, 3 tx, no L1 messages // => payload for chunk0 // 0000000000000000 - // 0000000000000000 + // 0000000000000456 // 0000000000000000000000000000000000000000000000000000000000000000 // 0000000000000000 // 0003 // ... (some tx hashes) // => data hash for chunk0 - // c4e0d99a191bfcb1ba2edd2964a0f0a56c929b1ecdf149ba3ae4f045d6e6ef8b + // 1c7649f248aed8448fa7997e44db7b7028581deb119c6d6aa1a2d126d62564cf // 2. chunk1 has three blocks // 2.1 block0 has 5 tx, 3 L1 messages, no skips // 2.2 block1 has 10 tx, 5 L1 messages, even is skipped, last is not skipped // 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped, last is not skipped // => payload for chunk1 // 0000000000000000 - // 0000000000000000 + // 0000000000000789 // 0000000000000000000000000000000000000000000000000000000000000000 // 0000000000000000 // 0005 // 0000000000000000 - // 0000000000000000 + // 0000000000001234 // 0000000000000000000000000000000000000000000000000000000000000000 // 0000000000000000 // 000a // 0000000000000000 - // 0000000000000000 + // 0000000000005678 // 0000000000000000000000000000000000000000000000000000000000000000 // 0000000000000000 // 012c - // => data hash for chunk2 - // a84759a83bba5f73e3a748d138ae7b6c5a31a8a5273aeb0e578807bf1ef6ed4e + // => data hash for chunk1 + // 4e82cb576135a69a0ecc2b2070c432abfdeb20076594faaa1aeed77f48d7c856 // => data hash for all chunks - // dae89323bf398ca9f6f8e83b1b0d603334be063fa3920015b6aa9df77a0ccbcd + // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 // => payload for batch header - // 01 + // 03 // 0000000000000002 // 0000000000000108 // 0000000000000109 - // dae89323bf398ca9f6f8e83b1b0d603334be063fa3920015b6aa9df77a0ccbcd + // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 - // 66b68a5092940d88a8c6f203d2071303557c024275d8ceaa2e12662bc61c8d8f - // aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa28000000000000000000000000000000000000000000000000000000000000002a + // 07e1bede8c5047cf8ca7ac84f5390837fb6224953af83d7e967488fa63a2065e + // 0000000000005678 + // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 + // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 // => hash for batch header - // b9dff5d21381176a73b20a9294eb2703c803113f9559e358708c659fa1cf62eb - bytes memory batchHeader2 = new bytes(121 + 32 + 32); + // 8a59f0de6f1071c0f48d6a49d9b794008d28b63cc586da0f44f8b2b4e13cb231 + batchHeader2 = new bytes(193); assembly { - mstore8(add(batchHeader2, 0x20), 1) // version + mstore8(add(batchHeader2, 0x20), 4) // version mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2 mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264 mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265 - mstore(add(batchHeader2, add(0x20, 25)), 0xdae89323bf398ca9f6f8e83b1b0d603334be063fa3920015b6aa9df77a0ccbcd) // dataHash + mstore(add(batchHeader2, add(0x20, 25)), 0x166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239) // dataHash mstore(add(batchHeader2, add(0x20, 57)), blobVersionedHash) // blobVersionedHash mstore(add(batchHeader2, add(0x20, 89)), batchHash1) // parentBatchHash - mstore( - add(batchHeader2, add(0x20, 121)), - 77194726158210796949047323339125271902179989777093709359638389338608753093160 - ) // bitmap0 - mstore(add(batchHeader2, add(0x20, 153)), 42) // bitmap1 + mstore(add(batchHeader2, add(0x20, 121)), shl(192, 0x5678)) // lastBlockTimestamp + mcopy(add(batchHeader2, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof } chunk0 = new bytes(1 + 60); assembly { mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 + mstore(add(chunk0, add(0x21, 8)), shl(192, 0x456)) // timestamp = 0x456 mstore(add(chunk0, add(0x21, 56)), shl(240, 3)) // numTransactions = 3 mstore(add(chunk0, add(0x21, 58)), shl(240, 0)) // numL1Messages = 0 } chunk1 = new bytes(1 + 60 * 3); assembly { mstore(add(chunk1, 0x20), shl(248, 3)) // numBlocks = 3 + mstore(add(chunk1, add(33, 8)), shl(192, 0x789)) // block0.timestamp = 0x789 mstore(add(chunk1, add(33, 56)), shl(240, 5)) // block0.numTransactions = 5 mstore(add(chunk1, add(33, 58)), shl(240, 3)) // block0.numL1Messages = 3 + mstore(add(chunk1, add(93, 8)), shl(192, 0x1234)) // block1.timestamp = 0x1234 mstore(add(chunk1, add(93, 56)), shl(240, 10)) // block1.numTransactions = 10 mstore(add(chunk1, add(93, 58)), shl(240, 5)) // block1.numL1Messages = 5 + mstore(add(chunk1, add(153, 8)), shl(192, 0x5678)) // block1.timestamp = 0x5678 mstore(add(chunk1, add(153, 56)), shl(240, 300)) // block1.numTransactions = 300 mstore(add(chunk1, add(153, 58)), shl(240, 256)) // block1.numL1Messages = 256 } @@ -566,42 +523,38 @@ contract ScrollChainTest is DSTestPlus { rollup.updateMaxNumTxInChunk(2); // 3 - 1 hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorTooManyTxsInOneChunk.selector); - rollup.commitBatch(1, batchHeader1, chunks, bitmap); // first chunk with too many txs + rollup.commitBatchWithBlobProof(4, batchHeader1, chunks, bitmap, blobDataProof); // first chunk with too many txs hevm.stopPrank(); rollup.updateMaxNumTxInChunk(185); // 5+10+300 - 2 - 127 hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorTooManyTxsInOneChunk.selector); - rollup.commitBatch(1, batchHeader1, chunks, bitmap); // second chunk with too many txs + rollup.commitBatchWithBlobProof(4, batchHeader1, chunks, bitmap, blobDataProof); // second chunk with too many txs hevm.stopPrank(); rollup.updateMaxNumTxInChunk(186); hevm.startPrank(address(0)); hevm.expectEmit(true, true, false, true); emit CommitBatch(2, keccak256(batchHeader2)); - rollup.commitBatch(1, batchHeader1, chunks, bitmap); + rollup.commitBatchWithBlobProof(4, batchHeader1, chunks, bitmap, blobDataProof); hevm.stopPrank(); assertBoolEq(rollup.isBatchFinalized(2), false); bytes32 batchHash2 = rollup.committedBatches(2); assertEq(batchHash2, keccak256(batchHeader2)); + assertEq(265, messageQueue.pendingQueueIndex()); + assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); + } + + function testCommitAndFinalizeWithL1MessagesV3() external { + rollup.addSequencer(address(0)); + rollup.addProver(address(0)); + + // import 300 L1 messages + for (uint256 i = 0; i < 300; i++) { + messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); + } + + (, , bytes memory batchHeader2) = _commitBatchV3(); - // verify committed batch correctly - hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit FinalizeBatch(2, batchHash2, bytes32(uint256(4)), bytes32(uint256(5))); - rollup.finalizeBatchWithProof4844( - batchHeader2, - bytes32(uint256(2)), - bytes32(uint256(4)), - bytes32(uint256(5)), - blobDataProof, - new bytes(0) - ); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(2), true); - assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(4))); - assertEq(rollup.withdrawRoots(2), bytes32(uint256(5))); - assertEq(rollup.lastFinalizedBatchIndex(), 2); - assertEq(messageQueue.pendingQueueIndex(), 265); // 1 ~ 4, zero for (uint256 i = 1; i < 4; i++) { assertBoolEq(messageQueue.isMessageSkipped(i), false); @@ -622,901 +575,299 @@ contract ScrollChainTest is DSTestPlus { assertBoolEq(messageQueue.isMessageSkipped(i), true); } } - } - - function testCommitBatchV3() external { - bytes memory batchHeader0 = new bytes(89); - // import 10 L1 messages - for (uint256 i = 0; i < 10; i++) { - messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); - } - // import genesis batch first - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) - } - rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - assertEq(rollup.committedBatches(0), keccak256(batchHeader0)); + // finalize batch1 and batch2 together + assertBoolEq(rollup.isBatchFinalized(1), false); + assertBoolEq(rollup.isBatchFinalized(2), false); + hevm.startPrank(address(0)); + rollup.finalizeBundleWithProof(batchHeader2, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); + hevm.stopPrank(); + assertBoolEq(rollup.isBatchFinalized(1), true); + assertBoolEq(rollup.isBatchFinalized(2), true); + assertEq(rollup.finalizedStateRoots(1), bytes32(0)); + assertEq(rollup.withdrawRoots(1), bytes32(0)); + assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(2))); + assertEq(rollup.withdrawRoots(2), bytes32(uint256(3))); + assertEq(rollup.lastFinalizedBatchIndex(), 2); + assertEq(265, messageQueue.nextUnfinalizedQueueIndex()); + } - // caller not sequencer, revert - hevm.expectRevert(ScrollChain.ErrorCallerIsNotSequencer.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); - rollup.addSequencer(address(0)); + function testCommitBatchV5() external { + bytes[] memory headers = _prepareFinalizeBundle(); - // revert when ErrorIncorrectBatchVersion + // revert when ErrorV5BatchNotContainsOnlyOneChunk hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorIncorrectBatchVersion.selector); - rollup.commitBatchWithBlobProof(2, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorV5BatchNotContainsOnlyOneChunk.selector); // 0 chunk + rollup.commitBatchWithBlobProof(5, headers[10], new bytes[](0), new bytes(0), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorV5BatchNotContainsOnlyOneChunk.selector); // 2 chunks + rollup.commitBatchWithBlobProof(5, headers[10], new bytes[](2), new bytes(0), new bytes(0)); hevm.stopPrank(); - // revert when ErrorBatchIsEmpty + bytes[] memory chunks = new bytes[](1); + // revert when ErrorV5BatchNotContainsOnlyOneBlock hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorBatchIsEmpty.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, new bytes[](0), new bytes(0), new bytes(0)); + chunks[0] = new bytes(1); + hevm.expectRevert(ChunkCodecV1.ErrorNoBlockInChunkV1.selector); // 1 chunk, 0 block + rollup.commitBatchWithBlobProof(5, headers[10], chunks, new bytes(0), new bytes(0)); + for (uint256 i = 2; i < 256; ++i) { + chunks[0] = new bytes(1 + 60 * i); + chunks[0][0] = bytes1(uint8(i)); + hevm.expectRevert(ScrollChain.ErrorV5BatchNotContainsOnlyOneBlock.selector); // 1 chunk, i block + rollup.commitBatchWithBlobProof(5, headers[10], chunks, new bytes(0), new bytes(0)); + } hevm.stopPrank(); - // revert when ErrorBatchHeaderV3LengthMismatch - bytes memory header = new bytes(192); - assembly { - mstore8(add(header, 0x20), 3) // version - } + // revert when ErrorV5BatchContainsTransactions hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV3Codec.ErrorBatchHeaderV3LengthMismatch.selector); - rollup.commitBatchWithBlobProof(3, header, new bytes[](1), new bytes(0), new bytes(0)); + for (uint256 x = 0; x < 5; ++x) { + for (uint256 y = 0; y < 5; ++y) { + if (x + y == 0) continue; + bytes memory chunk = new bytes(1 + 60); + chunk[0] = bytes1(uint8(1)); + uint256 blockPtr; + assembly { + blockPtr := add(chunk, 0x21) + mstore(add(blockPtr, 56), shl(240, add(x, y))) + mstore(add(blockPtr, 58), shl(240, y)) + } + assertEq(x + y, ChunkCodecV1.getNumTransactions(blockPtr)); + assertEq(y, ChunkCodecV1.getNumL1Messages(blockPtr)); + chunks[0] = chunk; + hevm.expectRevert(ScrollChain.ErrorV5BatchContainsTransactions.selector); // 1 chunk, 1 nonempty block + rollup.commitBatchWithBlobProof(5, headers[10], chunks, new bytes(0), new bytes(0)); + } + } hevm.stopPrank(); - // revert when ErrorIncorrectBatchHash - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 2) // change data hash for batch0 - } + assertEq(rollup.initialEuclidBatchIndex(), 0); + bytes memory v5Header = _commitBatch(5, headers[10], 0, 0); + assertEq(rollup.initialEuclidBatchIndex(), 11); + + // revert when commit again hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, new bytes[](1), new bytes(0), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyCommitted.selector); + rollup.commitBatchWithBlobProof(5, v5Header, new bytes[](0), new bytes(0), new bytes(0)); hevm.stopPrank(); - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) // change back - } + } - bytes[] memory chunks = new bytes[](1); - bytes memory chunk0; + function testFinalizeEuclidInitialBatch() external { + bytes[] memory headers = _prepareFinalizeBundle(); - // no block in chunk, revert - chunk0 = new bytes(1); - chunks[0] = chunk0; - hevm.startPrank(address(0)); - hevm.expectRevert(ChunkCodecV1.ErrorNoBlockInChunkV1.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), new bytes(0)); + // commit v5 batch + assertEq(rollup.initialEuclidBatchIndex(), 0); + bytes memory v5Header = _commitBatch(5, headers[10], 0, 0); + assertEq(rollup.initialEuclidBatchIndex(), 11); + + // commit 3 v6 batches + bytes memory v6Header1 = _commitBatch(6, v5Header, 1, 1); + bytes memory v6Header2 = _commitBatch(6, v6Header1, 2, 1); + bytes memory v6Header3 = _commitBatch(6, v6Header2, 3, 1); + + // revert when caller is not owner + hevm.startPrank(address(1)); + hevm.expectRevert("Ownable: caller is not the owner"); + rollup.finalizeEuclidInitialBatch(bytes32(0)); hevm.stopPrank(); - // invalid chunk length, revert - chunk0 = new bytes(1); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; - hevm.startPrank(address(0)); - hevm.expectRevert(ChunkCodecV1.ErrorIncorrectChunkLengthV1.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), new bytes(0)); - hevm.stopPrank(); - - // cannot skip last L1 message, revert - chunk0 = new bytes(1 + 60); - bytes memory bitmap = new bytes(32); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunk0[58] = bytes1(uint8(1)); // numTransactions = 1 - chunk0[60] = bytes1(uint8(1)); // numL1Messages = 1 - bitmap[31] = bytes1(uint8(1)); - chunks[0] = chunk0; - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorLastL1MessageSkipped.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, bitmap, new bytes(0)); - hevm.stopPrank(); - - // num txs less than num L1 msgs, revert - chunk0 = new bytes(1 + 60); - bitmap = new bytes(32); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunk0[58] = bytes1(uint8(1)); // numTransactions = 1 - chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3 - bitmap[31] = bytes1(uint8(3)); - chunks[0] = chunk0; - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorNumTxsLessThanNumL1Msgs.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, bitmap, new bytes(0)); - hevm.stopPrank(); - - // revert when ErrorNoBlobFound - // revert when ErrorNoBlobFound - chunk0 = new bytes(1 + 60); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorNoBlobFound.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), new bytes(0)); - hevm.stopPrank(); - - // @note we cannot check `ErrorFoundMultipleBlobs` here - - // upgrade to ScrollChainMockBlob - ScrollChainMockBlob impl = new ScrollChainMockBlob( - rollup.layer2ChainId(), - rollup.messageQueue(), - rollup.verifier() - ); - admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); - // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 - bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; - bytes - memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); - - chunk0 = new bytes(1 + 60); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; - // revert when ErrorCallPointEvaluationPrecompileFailed - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorCallPointEvaluationPrecompileFailed.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), new bytes(0)); - hevm.stopPrank(); - - bytes32 batchHash0 = rollup.committedBatches(0); - bytes memory batchHeader1 = new bytes(193); - assembly { - mstore8(add(batchHeader1, 0x20), 3) // version - mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex - mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped - mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped - mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash - mstore(add(batchHeader1, add(0x20, 121)), 0) // lastBlockTimestamp - mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof - } - // hash is ed32768c5f910a11edaf1c1ec0c0da847def9d24e0a24567c3c3d284061cf935 - - // succeed - hevm.startPrank(address(0)); - assertEq(rollup.committedBatches(1), bytes32(0)); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), blobDataProof); - hevm.stopPrank(); - assertEq(rollup.committedBatches(1), keccak256(batchHeader1)); - - // revert when ErrorBatchIsAlreadyCommitted - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyCommitted.selector); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), blobDataProof); - hevm.stopPrank(); - } - - function testFinalizeBundleWithProof() external { - // caller not prover, revert - hevm.expectRevert(ScrollChain.ErrorCallerIsNotProver.selector); - rollup.finalizeBundleWithProof(new bytes(0), bytes32(0), bytes32(0), new bytes(0)); - - rollup.addProver(address(0)); - rollup.addSequencer(address(0)); - - // import genesis batch - bytes memory batchHeader0 = new bytes(89); - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) - } - rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - - // upgrade to ScrollChainMockBlob - ScrollChainMockBlob impl = new ScrollChainMockBlob( - rollup.layer2ChainId(), - rollup.messageQueue(), - rollup.verifier() - ); - admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); - // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 - bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; - bytes - memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); - - bytes[] memory chunks = new bytes[](1); - bytes memory chunk0; - - bytes32 batchHash0 = rollup.committedBatches(0); - bytes memory batchHeader1 = new bytes(193); - assembly { - mstore8(add(batchHeader1, 0x20), 3) // version - mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex - mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped - mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped - mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash - mstore(add(batchHeader1, add(0x20, 121)), 0) // lastBlockTimestamp - mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof - } - // hash is ed32768c5f910a11edaf1c1ec0c0da847def9d24e0a24567c3c3d284061cf935 - - // commit one batch - chunk0 = new bytes(1 + 60); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; - hevm.startPrank(address(0)); - assertEq(rollup.committedBatches(1), bytes32(0)); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, new bytes(0), blobDataProof); - hevm.stopPrank(); - assertEq(rollup.committedBatches(1), keccak256(batchHeader1)); - // revert when ErrorStateRootIsZero - hevm.startPrank(address(0)); hevm.expectRevert(ScrollChain.ErrorStateRootIsZero.selector); - rollup.finalizeBundleWithProof(batchHeader1, bytes32(0), bytes32(0), new bytes(0)); - hevm.stopPrank(); - - // revert when ErrorBatchHeaderV3LengthMismatch - bytes memory header = new bytes(192); - assembly { - mstore8(add(header, 0x20), 3) // version - } - hevm.startPrank(address(0)); - hevm.expectRevert(BatchHeaderV3Codec.ErrorBatchHeaderV3LengthMismatch.selector); - rollup.finalizeBundleWithProof(header, bytes32(uint256(1)), bytes32(uint256(2)), new bytes(0)); - hevm.stopPrank(); - - // revert when ErrorIncorrectBatchHash - batchHeader1[1] = bytes1(uint8(1)); // change random byte - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(1)), bytes32(uint256(2)), new bytes(0)); - hevm.stopPrank(); - batchHeader1[1] = bytes1(uint8(0)); // change back - - // verify success - assertBoolEq(rollup.isBatchFinalized(1), false); - hevm.startPrank(address(0)); - rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), true); - assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2))); - assertEq(rollup.withdrawRoots(1), bytes32(uint256(3))); - assertEq(rollup.lastFinalizedBatchIndex(), 1); + rollup.finalizeEuclidInitialBatch(bytes32(0)); - // revert when ErrorBatchIsAlreadyVerified + // finalize first 9 batches hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyVerified.selector); - rollup.finalizeBundleWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); + assertEq(rollup.lastFinalizedBatchIndex(), 0); + rollup.finalizeBundleWithProof(headers[9], keccak256("009"), keccak256("109"), new bytes(0)); + assertEq(rollup.lastFinalizedBatchIndex(), 9); hevm.stopPrank(); - } - - function _commitBatchV3() - internal - returns ( - bytes memory batchHeader0, - bytes memory batchHeader1, - bytes memory batchHeader2 - ) - { - // import genesis batch first - batchHeader0 = new bytes(89); - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) - } - rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - bytes32 batchHash0 = rollup.committedBatches(0); - - // upgrade to ScrollChainMockBlob - ScrollChainMockBlob impl = new ScrollChainMockBlob( - rollup.layer2ChainId(), - rollup.messageQueue(), - rollup.verifier() - ); - admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); - // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 - bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; - bytes - memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); - - bytes memory bitmap; - bytes[] memory chunks; - bytes memory chunk0; - bytes memory chunk1; - // commit batch1, one chunk with one block, 1 tx, 1 L1 message, no skip - // => payload for data hash of chunk0 - // 0000000000000000 - // 0000000000000123 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0001 - // a2277fd30bbbe74323309023b56035b376d7768ad237ae4fc46ead7dc9591ae1 - // => data hash for chunk0 - // 5972b8fa626c873a97abb6db14fb0cb2085e050a6f80ec90b92bb0bbaa12eb5a - // => data hash for all chunks - // f6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359 - // => payload for batch header - // 03 - // 0000000000000001 - // 0000000000000001 - // 0000000000000001 - // f6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359 - // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 - // 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61 - // 0000000000000123 - // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 - // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 - // => hash for batch header - // 07e1bede8c5047cf8ca7ac84f5390837fb6224953af83d7e967488fa63a2065e - batchHeader1 = new bytes(193); - assembly { - mstore8(add(batchHeader1, 0x20), 3) // version - mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1 - mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1 - mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1 - mstore(add(batchHeader1, add(0x20, 25)), 0xf6166fe668c1e6a04e3c75e864452bb02a31358f285efcb7a4e6603eb5750359) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash - mstore(add(batchHeader1, add(0x20, 121)), shl(192, 0x123)) // lastBlockTimestamp - mcopy(add(batchHeader1, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof - } - chunk0 = new bytes(1 + 60); - assembly { - mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 - mstore(add(chunk0, add(0x21, 8)), shl(192, 0x123)) // timestamp = 0x123 - mstore(add(chunk0, add(0x21, 56)), shl(240, 1)) // numTransactions = 1 - mstore(add(chunk0, add(0x21, 58)), shl(240, 1)) // numL1Messages = 1 - } - chunks = new bytes[](1); - chunks[0] = chunk0; - bitmap = new bytes(32); - hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit CommitBatch(1, keccak256(batchHeader1)); - rollup.commitBatchWithBlobProof(3, batchHeader0, chunks, bitmap, blobDataProof); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), false); - bytes32 batchHash1 = rollup.committedBatches(1); - assertEq(batchHash1, keccak256(batchHeader1)); - assertEq(1, messageQueue.pendingQueueIndex()); - assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); - assertBoolEq(messageQueue.isMessageSkipped(0), false); - - // commit batch2 with two chunks, correctly - // 1. chunk0 has one block, 3 tx, no L1 messages - // => payload for chunk0 - // 0000000000000000 - // 0000000000000456 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0003 - // ... (some tx hashes) - // => data hash for chunk0 - // 1c7649f248aed8448fa7997e44db7b7028581deb119c6d6aa1a2d126d62564cf - // 2. chunk1 has three blocks - // 2.1 block0 has 5 tx, 3 L1 messages, no skips - // 2.2 block1 has 10 tx, 5 L1 messages, even is skipped, last is not skipped - // 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped, last is not skipped - // => payload for chunk1 - // 0000000000000000 - // 0000000000000789 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0005 - // 0000000000000000 - // 0000000000001234 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 000a - // 0000000000000000 - // 0000000000005678 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 012c - // => data hash for chunk1 - // 4e82cb576135a69a0ecc2b2070c432abfdeb20076594faaa1aeed77f48d7c856 - // => data hash for all chunks - // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 - // => payload for batch header - // 03 - // 0000000000000002 - // 0000000000000108 - // 0000000000000109 - // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 - // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 - // 07e1bede8c5047cf8ca7ac84f5390837fb6224953af83d7e967488fa63a2065e - // 0000000000005678 - // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 - // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 - // => hash for batch header - // 8a59f0de6f1071c0f48d6a49d9b794008d28b63cc586da0f44f8b2b4e13cb231 - batchHeader2 = new bytes(193); - assembly { - mstore8(add(batchHeader2, 0x20), 3) // version - mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2 - mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264 - mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265 - mstore(add(batchHeader2, add(0x20, 25)), 0x166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239) // dataHash - mstore(add(batchHeader2, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader2, add(0x20, 89)), batchHash1) // parentBatchHash - mstore(add(batchHeader2, add(0x20, 121)), shl(192, 0x5678)) // lastBlockTimestamp - mcopy(add(batchHeader2, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof - } - chunk0 = new bytes(1 + 60); - assembly { - mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 - mstore(add(chunk0, add(0x21, 8)), shl(192, 0x456)) // timestamp = 0x456 - mstore(add(chunk0, add(0x21, 56)), shl(240, 3)) // numTransactions = 3 - mstore(add(chunk0, add(0x21, 58)), shl(240, 0)) // numL1Messages = 0 - } - chunk1 = new bytes(1 + 60 * 3); - assembly { - mstore(add(chunk1, 0x20), shl(248, 3)) // numBlocks = 3 - mstore(add(chunk1, add(33, 8)), shl(192, 0x789)) // block0.timestamp = 0x789 - mstore(add(chunk1, add(33, 56)), shl(240, 5)) // block0.numTransactions = 5 - mstore(add(chunk1, add(33, 58)), shl(240, 3)) // block0.numL1Messages = 3 - mstore(add(chunk1, add(93, 8)), shl(192, 0x1234)) // block1.timestamp = 0x1234 - mstore(add(chunk1, add(93, 56)), shl(240, 10)) // block1.numTransactions = 10 - mstore(add(chunk1, add(93, 58)), shl(240, 5)) // block1.numL1Messages = 5 - mstore(add(chunk1, add(153, 8)), shl(192, 0x5678)) // block1.timestamp = 0x5678 - mstore(add(chunk1, add(153, 56)), shl(240, 300)) // block1.numTransactions = 300 - mstore(add(chunk1, add(153, 58)), shl(240, 256)) // block1.numL1Messages = 256 - } - chunks = new bytes[](2); - chunks[0] = chunk0; - chunks[1] = chunk1; - bitmap = new bytes(64); - assembly { - mstore( - add(bitmap, add(0x20, 0)), - 77194726158210796949047323339125271902179989777093709359638389338608753093160 - ) // bitmap0 - mstore(add(bitmap, add(0x20, 32)), 42) // bitmap1 - } - - // too many txs in one chunk, revert - rollup.updateMaxNumTxInChunk(2); // 3 - 1 - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorTooManyTxsInOneChunk.selector); - rollup.commitBatchWithBlobProof(3, batchHeader1, chunks, bitmap, blobDataProof); // first chunk with too many txs - hevm.stopPrank(); - rollup.updateMaxNumTxInChunk(185); // 5+10+300 - 2 - 127 - hevm.startPrank(address(0)); - hevm.expectRevert(ScrollChain.ErrorTooManyTxsInOneChunk.selector); - rollup.commitBatchWithBlobProof(3, batchHeader1, chunks, bitmap, blobDataProof); // second chunk with too many txs - hevm.stopPrank(); - - rollup.updateMaxNumTxInChunk(186); - hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit CommitBatch(2, keccak256(batchHeader2)); - rollup.commitBatchWithBlobProof(3, batchHeader1, chunks, bitmap, blobDataProof); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(2), false); - bytes32 batchHash2 = rollup.committedBatches(2); - assertEq(batchHash2, keccak256(batchHeader2)); - assertEq(265, messageQueue.pendingQueueIndex()); - assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); - } - - function testCommitAndFinalizeWithL1MessagesV3() external { - rollup.addSequencer(address(0)); - rollup.addProver(address(0)); - - // import 300 L1 messages - for (uint256 i = 0; i < 300; i++) { - messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); - } - - (bytes memory batchHeader0, bytes memory batchHeader1, bytes memory batchHeader2) = _commitBatchV3(); - - // 1 ~ 4, zero - for (uint256 i = 1; i < 4; i++) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } - // 4 ~ 9, even is nonzero, odd is zero - for (uint256 i = 4; i < 9; i++) { - if (i % 2 == 1 || i == 8) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } else { - assertBoolEq(messageQueue.isMessageSkipped(i), true); - } - } - // 9 ~ 265, even is nonzero, odd is zero - for (uint256 i = 9; i < 265; i++) { - if (i % 2 == 1 || i == 264) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } else { - assertBoolEq(messageQueue.isMessageSkipped(i), true); - } - } - - // finalize batch1 and batch2 together - assertBoolEq(rollup.isBatchFinalized(1), false); - assertBoolEq(rollup.isBatchFinalized(2), false); - hevm.startPrank(address(0)); - rollup.finalizeBundleWithProof(batchHeader2, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), true); - assertBoolEq(rollup.isBatchFinalized(2), true); - assertEq(rollup.finalizedStateRoots(1), bytes32(0)); - assertEq(rollup.withdrawRoots(1), bytes32(0)); - assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(2))); - assertEq(rollup.withdrawRoots(2), bytes32(uint256(3))); - assertEq(rollup.lastFinalizedBatchIndex(), 2); - assertEq(265, messageQueue.nextUnfinalizedQueueIndex()); - } - - function testRevertBatchWithL1Messages() external { - rollup.addSequencer(address(0)); - rollup.addProver(address(0)); - - // import 300 L1 messages - for (uint256 i = 0; i < 300; i++) { - messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); - } - - (bytes memory batchHeader0, bytes memory batchHeader1, bytes memory batchHeader2) = _commitBatchV3(); - - // 1 ~ 4, zero - for (uint256 i = 1; i < 4; i++) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } - // 4 ~ 9, even is nonzero, odd is zero - for (uint256 i = 4; i < 9; i++) { - if (i % 2 == 1 || i == 8) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } else { - assertBoolEq(messageQueue.isMessageSkipped(i), true); - } - } - // 9 ~ 265, even is nonzero, odd is zero - for (uint256 i = 9; i < 265; i++) { - if (i % 2 == 1 || i == 264) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } else { - assertBoolEq(messageQueue.isMessageSkipped(i), true); - } - } - - // revert batch 1 and batch 2 - rollup.revertBatch(batchHeader1, batchHeader2); - assertEq(0, messageQueue.pendingQueueIndex()); - assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); - for (uint256 i = 0; i < 265; i++) { - assertBoolEq(messageQueue.isMessageSkipped(i), false); - } - } - - function testSwitchBatchFromV1ToV3() external { - rollup.addSequencer(address(0)); - rollup.addProver(address(0)); - - // import 300 L1 messages - for (uint256 i = 0; i < 300; i++) { - messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); - } - - // import genesis batch first - bytes memory batchHeader0 = new bytes(89); - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) - } - rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - bytes32 batchHash0 = rollup.committedBatches(0); - - // upgrade to ScrollChainMockBlob - ScrollChainMockBlob impl = new ScrollChainMockBlob( - rollup.layer2ChainId(), - rollup.messageQueue(), - rollup.verifier() - ); - admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); - // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 - bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; - bytes - memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); - - bytes memory bitmap; - bytes[] memory chunks; - bytes memory chunk0; - bytes memory chunk1; - - // commit batch1 with version v1, one chunk with one block, 1 tx, 1 L1 message, no skip - // => payload for data hash of chunk0 - // 0000000000000000 - // 0000000000000000 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0001 - // a2277fd30bbbe74323309023b56035b376d7768ad237ae4fc46ead7dc9591ae1 - // => data hash for chunk0 - // 9ef1e5694bdb014a1eea42be756a8f63bfd8781d6332e9ef3b5126d90c62f110 - // => data hash for all chunks - // d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3 - // => payload for batch header - // 01 - // 0000000000000001 - // 0000000000000001 - // 0000000000000001 - // d9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3 - // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 - // 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61 - // 0000000000000000000000000000000000000000000000000000000000000000 - // => hash for batch header - // 66b68a5092940d88a8c6f203d2071303557c024275d8ceaa2e12662bc61c8d8f - bytes memory batchHeader1 = new bytes(121 + 32); - assembly { - mstore8(add(batchHeader1, 0x20), 1) // version - mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1 - mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1 - mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1 - mstore(add(batchHeader1, add(0x20, 25)), 0xd9cb6bf9264006fcea490d5c261f7453ab95b1b26033a3805996791b8e3a62f3) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash - mstore(add(batchHeader1, add(0x20, 121)), 0) // bitmap0 - } - chunk0 = new bytes(1 + 60); - assembly { - mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 - mstore(add(chunk0, add(0x21, 56)), shl(240, 1)) // numTransactions = 1 - mstore(add(chunk0, add(0x21, 58)), shl(240, 1)) // numL1Messages = 1 - } - chunks = new bytes[](1); - chunks[0] = chunk0; - bitmap = new bytes(32); - hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit CommitBatch(1, keccak256(batchHeader1)); - rollup.commitBatch(1, batchHeader0, chunks, bitmap); - hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), false); - bytes32 batchHash1 = rollup.committedBatches(1); - assertEq(batchHash1, keccak256(batchHeader1)); + // revert when ErrorNotAllV4BatchFinalized + hevm.expectRevert(ScrollChain.ErrorNotAllV4BatchFinalized.selector); + rollup.finalizeEuclidInitialBatch(keccak256("011")); - // commit batch2 with version v2, with two chunks, correctly - // 1. chunk0 has one block, 3 tx, no L1 messages - // => payload for chunk0 - // 0000000000000000 - // 0000000000000456 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0003 - // ... (some tx hashes) - // => data hash for chunk0 - // 1c7649f248aed8448fa7997e44db7b7028581deb119c6d6aa1a2d126d62564cf - // 2. chunk1 has three blocks - // 2.1 block0 has 5 tx, 3 L1 messages, no skips - // 2.2 block1 has 10 tx, 5 L1 messages, even is skipped, last is not skipped - // 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped, last is not skipped - // => payload for chunk1 - // 0000000000000000 - // 0000000000000789 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 0005 - // 0000000000000000 - // 0000000000001234 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 000a - // 0000000000000000 - // 0000000000005678 - // 0000000000000000000000000000000000000000000000000000000000000000 - // 0000000000000000 - // 012c - // => data hash for chunk1 - // 4e82cb576135a69a0ecc2b2070c432abfdeb20076594faaa1aeed77f48d7c856 - // => data hash for all chunks - // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 - // => payload for batch header - // 03 - // 0000000000000002 - // 0000000000000108 - // 0000000000000109 - // 166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239 - // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 - // 66b68a5092940d88a8c6f203d2071303557c024275d8ceaa2e12662bc61c8d8f - // 0000000000005678 - // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 - // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 - // => hash for batch header - // f212a256744ca658dfc4eb32665aa0fe845eb757a030bd625cb2880055e3cc92 - bytes memory batchHeader2 = new bytes(193); - assembly { - mstore8(add(batchHeader2, 0x20), 3) // version - mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2 - mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264 - mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265 - mstore(add(batchHeader2, add(0x20, 25)), 0x166e9d20206ae8cddcdf0f30093e3acc3866937172df5d7f69fb5567d9595239) // dataHash - mstore(add(batchHeader2, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader2, add(0x20, 89)), batchHash1) // parentBatchHash - mstore(add(batchHeader2, add(0x20, 121)), shl(192, 0x5678)) // lastBlockTimestamp - mcopy(add(batchHeader2, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof - } - chunk0 = new bytes(1 + 60); - assembly { - mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 - mstore(add(chunk0, add(0x21, 8)), shl(192, 0x456)) // timestamp = 0x456 - mstore(add(chunk0, add(0x21, 56)), shl(240, 3)) // numTransactions = 3 - mstore(add(chunk0, add(0x21, 58)), shl(240, 0)) // numL1Messages = 0 - } - chunk1 = new bytes(1 + 60 * 3); - assembly { - mstore(add(chunk1, 0x20), shl(248, 3)) // numBlocks = 3 - mstore(add(chunk1, add(33, 8)), shl(192, 0x789)) // block0.timestamp = 0x789 - mstore(add(chunk1, add(33, 56)), shl(240, 5)) // block0.numTransactions = 5 - mstore(add(chunk1, add(33, 58)), shl(240, 3)) // block0.numL1Messages = 3 - mstore(add(chunk1, add(93, 8)), shl(192, 0x1234)) // block1.timestamp = 0x1234 - mstore(add(chunk1, add(93, 56)), shl(240, 10)) // block1.numTransactions = 10 - mstore(add(chunk1, add(93, 58)), shl(240, 5)) // block1.numL1Messages = 5 - mstore(add(chunk1, add(153, 8)), shl(192, 0x5678)) // block1.timestamp = 0x5678 - mstore(add(chunk1, add(153, 56)), shl(240, 300)) // block1.numTransactions = 300 - mstore(add(chunk1, add(153, 58)), shl(240, 256)) // block1.numL1Messages = 256 - } - chunks = new bytes[](2); - chunks[0] = chunk0; - chunks[1] = chunk1; - bitmap = new bytes(64); - assembly { - mstore( - add(bitmap, add(0x20, 0)), - 77194726158210796949047323339125271902179989777093709359638389338608753093160 - ) // bitmap0 - mstore(add(bitmap, add(0x20, 32)), 42) // bitmap1 - } + // revert when ErrorFinalizePreAndPostEuclidBatchInOneBundle, v4+v5 + hevm.startPrank(address(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v5Header, keccak256("011"), keccak256("111"), new bytes(0)); + hevm.stopPrank(); - rollup.updateMaxNumTxInChunk(186); - // should revert, when all v1 batch not finalized + // revert when ErrorFinalizePreAndPostEuclidBatchInOneBundle, v4+v5+v6 hevm.startPrank(address(0)); - hevm.expectRevert("start index mismatch"); - rollup.commitBatchWithBlobProof(3, batchHeader1, chunks, bitmap, blobDataProof); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header1, keccak256("011"), keccak256("111"), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header2, keccak256("011"), keccak256("111"), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header3, keccak256("011"), keccak256("111"), new bytes(0)); hevm.stopPrank(); - // finalize batch1 + // finalize batch 10 hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit FinalizeBatch(1, batchHash1, bytes32(uint256(2)), bytes32(uint256(3))); - rollup.finalizeBatchWithProof4844( - batchHeader1, - bytes32(uint256(1)), - bytes32(uint256(2)), - bytes32(uint256(3)), - blobDataProof, - new bytes(0) - ); + rollup.finalizeBundleWithProof(headers[10], keccak256("010"), keccak256("110"), new bytes(0)); + assertEq(rollup.lastFinalizedBatchIndex(), 10); hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(1), true); - assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2))); - assertEq(rollup.withdrawRoots(1), bytes32(uint256(3))); - assertEq(rollup.lastFinalizedBatchIndex(), 1); - assertBoolEq(messageQueue.isMessageSkipped(0), false); - assertEq(messageQueue.pendingQueueIndex(), 1); - assertEq(messageQueue.nextUnfinalizedQueueIndex(), 1); + // revert when ErrorFinalizePreAndPostEuclidBatchInOneBundle, v5 hevm.startPrank(address(0)); - hevm.expectEmit(true, true, false, true); - emit CommitBatch(2, keccak256(batchHeader2)); - rollup.commitBatchWithBlobProof(3, batchHeader1, chunks, bitmap, blobDataProof); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v5Header, keccak256("011"), keccak256("111"), new bytes(0)); hevm.stopPrank(); - bytes32 batchHash2 = rollup.committedBatches(2); - assertEq(batchHash2, keccak256(batchHeader2)); - assertEq(messageQueue.pendingQueueIndex(), 265); - assertEq(messageQueue.nextUnfinalizedQueueIndex(), 1); - // finalize batch2 - assertBoolEq(rollup.isBatchFinalized(2), false); + // revert when ErrorFinalizePreAndPostEuclidBatchInOneBundle, v5+v6 hevm.startPrank(address(0)); - rollup.finalizeBundleWithProof(batchHeader2, bytes32(uint256(2)), bytes32(uint256(3)), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header1, keccak256("011"), keccak256("111"), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header2, keccak256("011"), keccak256("111"), new bytes(0)); + hevm.expectRevert(ScrollChain.ErrorFinalizePreAndPostEuclidBatchInOneBundle.selector); + rollup.finalizeBundleWithProof(v6Header3, keccak256("011"), keccak256("111"), new bytes(0)); hevm.stopPrank(); - assertBoolEq(rollup.isBatchFinalized(2), true); - assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(2))); - assertEq(rollup.withdrawRoots(2), bytes32(uint256(3))); - assertEq(rollup.lastFinalizedBatchIndex(), 2); - } - function testRevertBatch() external { - // upgrade to ScrollChainMockBlob - ScrollChainMockBlob impl = new ScrollChainMockBlob( - rollup.layer2ChainId(), - rollup.messageQueue(), - rollup.verifier() - ); - admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); + // succeed, withdraw root should be same as batch 10 + assertEq(rollup.finalizedStateRoots(11), 0); + assertEq(rollup.withdrawRoots(11), 0); + assertEq(rollup.lastFinalizedBatchIndex(), 10); + hevm.expectEmit(true, true, true, true); + emit FinalizeBatch(11, keccak256(v5Header), keccak256("011"), keccak256("110")); + rollup.finalizeEuclidInitialBatch(keccak256("011")); + assertEq(rollup.finalizedStateRoots(11), keccak256("011")); + assertEq(rollup.withdrawRoots(11), keccak256("110")); - // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 - bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; - bytes - memory blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; - ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); + // revert when ErrorStateRootIsZero + hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyVerified.selector); + rollup.finalizeEuclidInitialBatch(keccak256("011")); - // caller not owner, revert - hevm.startPrank(address(1)); - hevm.expectRevert("Ownable: caller is not the owner"); - rollup.revertBatch(new bytes(89), new bytes(89)); + // finalize 3 v6 batches + // revert when ErrorStateRootIsZero + hevm.startPrank(address(0)); + hevm.expectRevert(ScrollChain.ErrorStateRootIsZero.selector); + rollup.finalizeBundleWithProof(v6Header1, bytes32(0), bytes32(0), new bytes(0)); + + // finalize bundle with one batch + assertEq(rollup.finalizedStateRoots(12), 0); + assertEq(rollup.withdrawRoots(12), 0); + assertEq(rollup.lastFinalizedBatchIndex(), 11); + assertBoolEq(rollup.isBatchFinalized(12), false); + assertEq(messageQueue.nextUnfinalizedQueueIndex(), 10); + hevm.expectEmit(true, true, true, true); + emit FinalizeBatch(12, keccak256(v6Header1), keccak256("001"), keccak256("101")); + rollup.finalizeBundleWithProof(v6Header1, keccak256("001"), keccak256("101"), new bytes(0)); + assertEq(rollup.finalizedStateRoots(12), keccak256("001")); + assertEq(rollup.withdrawRoots(12), keccak256("101")); + assertEq(rollup.lastFinalizedBatchIndex(), 12); + assertBoolEq(rollup.isBatchFinalized(12), true); + assertEq(messageQueue.nextUnfinalizedQueueIndex(), 11); + + // revert when ErrorBatchIsAlreadyVerified + hevm.expectRevert(ScrollChain.ErrorBatchIsAlreadyVerified.selector); + rollup.finalizeBundleWithProof(v6Header1, keccak256("001"), keccak256("101"), new bytes(0)); + + // finalize bundle with two batch + assertEq(rollup.finalizedStateRoots(14), 0); + assertEq(rollup.withdrawRoots(14), 0); + assertEq(rollup.lastFinalizedBatchIndex(), 12); + assertEq(messageQueue.nextUnfinalizedQueueIndex(), 11); + hevm.expectEmit(true, true, true, true); + emit FinalizeBatch(14, keccak256(v6Header3), keccak256("003"), keccak256("103")); + rollup.finalizeBundleWithProof(v6Header3, keccak256("003"), keccak256("103"), new bytes(0)); + assertEq(rollup.finalizedStateRoots(14), keccak256("003")); + assertEq(rollup.withdrawRoots(14), keccak256("103")); + assertEq(rollup.lastFinalizedBatchIndex(), 14); + assertEq(messageQueue.nextUnfinalizedQueueIndex(), 16); hevm.stopPrank(); + } + function testRevertBatchWithL1Messages() external { rollup.addSequencer(address(0)); + rollup.addProver(address(0)); - bytes memory batchHeader0 = new bytes(89); - - // import genesis batch - assembly { - mstore(add(batchHeader0, add(0x20, 25)), 1) + // import 300 L1 messages + for (uint256 i = 0; i < 300; i++) { + messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); } - rollup.importGenesisBatch(batchHeader0, bytes32(uint256(1))); - bytes32 batchHash0 = rollup.committedBatches(0); - bytes[] memory chunks = new bytes[](1); - bytes memory chunk0; + (, bytes memory batchHeader1, bytes memory batchHeader2) = _commitBatchV3(); - // commit one batch - chunk0 = new bytes(1 + 60); - chunk0[0] = bytes1(uint8(1)); // one block in this chunk - chunks[0] = chunk0; - hevm.startPrank(address(0)); - rollup.commitBatch(1, batchHeader0, chunks, new bytes(0)); - bytes32 batchHash1 = rollup.committedBatches(1); - hevm.stopPrank(); + // 1 ~ 4, zero + for (uint256 i = 1; i < 4; i++) { + assertBoolEq(messageQueue.isMessageSkipped(i), false); + } + // 4 ~ 9, even is nonzero, odd is zero + for (uint256 i = 4; i < 9; i++) { + if (i % 2 == 1 || i == 8) { + assertBoolEq(messageQueue.isMessageSkipped(i), false); + } else { + assertBoolEq(messageQueue.isMessageSkipped(i), true); + } + } + // 9 ~ 265, even is nonzero, odd is zero + for (uint256 i = 9; i < 265; i++) { + if (i % 2 == 1 || i == 264) { + assertBoolEq(messageQueue.isMessageSkipped(i), false); + } else { + assertBoolEq(messageQueue.isMessageSkipped(i), true); + } + } - bytes memory batchHeader1 = new bytes(121); - assembly { - mstore8(add(batchHeader1, 0x20), 1) // version - mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex - mstore(add(batchHeader1, add(0x20, 9)), 0) // l1MessagePopped - mstore(add(batchHeader1, add(0x20, 17)), 0) // totalL1MessagePopped - mstore(add(batchHeader1, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader1, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader1, add(0x20, 89)), batchHash0) // parentBatchHash + // revert batch 1 and batch 2 + rollup.revertBatch(batchHeader1, batchHeader2); + assertEq(0, messageQueue.pendingQueueIndex()); + assertEq(0, messageQueue.nextUnfinalizedQueueIndex()); + for (uint256 i = 0; i < 265; i++) { + assertBoolEq(messageQueue.isMessageSkipped(i), false); } + } - // commit another batch - hevm.startPrank(address(0)); - rollup.commitBatch(1, batchHeader1, chunks, new bytes(0)); - hevm.stopPrank(); + function testRevertBatch() external { + bytes[] memory headers = _prepareFinalizeBundle(); - bytes memory batchHeader2 = new bytes(121); - assembly { - mstore8(add(batchHeader2, 0x20), 1) // version - mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex - mstore(add(batchHeader2, add(0x20, 9)), 0) // l1MessagePopped - mstore(add(batchHeader2, add(0x20, 17)), 0) // totalL1MessagePopped - mstore(add(batchHeader2, add(0x20, 25)), 0x246394445f4fe64ed5598554d55d1682d6fb3fe04bf58eb54ef81d1189fafb51) // dataHash - mstore(add(batchHeader2, add(0x20, 57)), blobVersionedHash) // blobVersionedHash - mstore(add(batchHeader2, add(0x20, 89)), batchHash1) // parentBatchHash - } + // caller not owner, revert + hevm.startPrank(address(1)); + hevm.expectRevert("Ownable: caller is not the owner"); + rollup.revertBatch(new bytes(0), new bytes(0)); + hevm.stopPrank(); // incorrect batch hash of first header, revert - batchHeader1[1] = bytes1(uint8(1)); // change random byte + headers[2][1] = bytes1(uint8(1)); // change random byte hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.revertBatch(batchHeader1, batchHeader0); - batchHeader1[1] = bytes1(uint8(0)); // change back + rollup.revertBatch(headers[2], headers[1]); + headers[2][1] = bytes1(uint8(0)); // change back // incorrect batch hash of second header, revert - batchHeader1[1] = bytes1(uint8(1)); // change random byte + headers[2][1] = bytes1(uint8(1)); // change random byte hevm.expectRevert(ScrollChain.ErrorIncorrectBatchHash.selector); - rollup.revertBatch(batchHeader0, batchHeader1); - batchHeader1[1] = bytes1(uint8(0)); // change back + rollup.revertBatch(headers[1], headers[2]); + headers[2][1] = bytes1(uint8(0)); // change back // count must be nonzero, revert hevm.expectRevert(ScrollChain.ErrorRevertZeroBatches.selector); - rollup.revertBatch(batchHeader1, batchHeader0); + rollup.revertBatch(headers[2], headers[1]); // revert middle batch, revert hevm.expectRevert(ScrollChain.ErrorRevertNotStartFromEnd.selector); - rollup.revertBatch(batchHeader1, batchHeader1); + rollup.revertBatch(headers[2], headers[3]); // can only revert unfinalized batch, revert hevm.expectRevert(ScrollChain.ErrorRevertFinalizedBatch.selector); - rollup.revertBatch(batchHeader0, batchHeader2); - - // succeed to revert next two pending batches. + rollup.revertBatch(headers[0], headers[10]); + // succeed to revert batches 9 and 10 + assertEq(rollup.committedBatches(9), keccak256(headers[9])); + assertEq(rollup.committedBatches(10), keccak256(headers[10])); hevm.expectEmit(true, true, false, true); - emit RevertBatch(2, rollup.committedBatches(2)); + emit RevertBatch(10, rollup.committedBatches(10)); hevm.expectEmit(true, true, false, true); - emit RevertBatch(1, rollup.committedBatches(1)); - - assertGt(uint256(rollup.committedBatches(1)), 0); - assertGt(uint256(rollup.committedBatches(2)), 0); - rollup.revertBatch(batchHeader1, batchHeader2); - assertEq(uint256(rollup.committedBatches(1)), 0); - assertEq(uint256(rollup.committedBatches(2)), 0); + emit RevertBatch(9, rollup.committedBatches(9)); + rollup.revertBatch(headers[9], headers[10]); + assertEq(rollup.committedBatches(9), 0); + assertEq(rollup.committedBatches(10), 0); + + // revert batches 6-8 + rollup.revertBatch(headers[6], headers[8]); + + // revert batches 4-5, with l1 messages + assertEq(10, messageQueue.pendingQueueIndex()); + rollup.revertBatch(headers[4], headers[5]); + assertEq(6, messageQueue.pendingQueueIndex()); } function testAddAndRemoveSequencer(address _sequencer) external { @@ -1589,11 +940,11 @@ contract ScrollChainTest is DSTestPlus { hevm.startPrank(address(0)); hevm.expectRevert("Pausable: paused"); - rollup.commitBatch(1, new bytes(0), new bytes[](0), new bytes(0)); + rollup.commitBatchWithBlobProof(4, new bytes(0), new bytes[](0), new bytes(0), new bytes(0)); hevm.expectRevert("Pausable: paused"); - rollup.commitBatchWithBlobProof(3, new bytes(0), new bytes[](0), new bytes(0), new bytes(0)); + rollup.commitBatchWithBlobProof(5, new bytes(0), new bytes[](0), new bytes(0), new bytes(0)); hevm.expectRevert("Pausable: paused"); - rollup.finalizeBatchWithProof4844(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0), new bytes(0)); + rollup.commitBatchWithBlobProof(6, new bytes(0), new bytes[](0), new bytes(0), new bytes(0)); hevm.expectRevert("Pausable: paused"); rollup.finalizeBundleWithProof(new bytes(0), bytes32(0), bytes32(0), new bytes(0)); hevm.stopPrank(); @@ -1693,4 +1044,161 @@ contract ScrollChainTest is DSTestPlus { TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(_logic, address(admin), new bytes(0)); return address(proxy); } + + function _upgradeToMockBlob() internal { + // upgrade to ScrollChainMockBlob for data mocking + ScrollChainMockBlob impl = new ScrollChainMockBlob( + rollup.layer2ChainId(), + rollup.messageQueue(), + rollup.verifier() + ); + admin.upgrade(ITransparentUpgradeableProxy(address(rollup)), address(impl)); + // from https://etherscan.io/blob/0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757?bid=740652 + bytes32 blobVersionedHash = 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757; + ScrollChainMockBlob(address(rollup)).setBlobVersionedHash(blobVersionedHash); + } + + /// @dev Prepare 10 batches, each of the first 5 has 2 l1 messages, each of the second 5 has no l1 message. + function _prepareFinalizeBundle() internal returns (bytes[] memory headers) { + // grant roles + rollup.addProver(address(0)); + rollup.addSequencer(address(0)); + _upgradeToMockBlob(); + + headers = new bytes[](11); + // import 20 L1 messages + for (uint256 i = 0; i < 20; i++) { + messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0)); + } + // commit genesis batch + headers[0] = _commitGenesisBatch(); + // commit 5 batches, each has 2 l1 messages + for (uint256 i = 1; i <= 5; ++i) { + headers[i] = _commitBatch(4, headers[i - 1], 2, 1); + } + // commit 5 batches, each has 0 l1 message + for (uint256 i = 6; i <= 10; ++i) { + headers[i] = _commitBatch(4, headers[i - 1], 0, 1); + } + } + + function _commitGenesisBatch() internal returns (bytes memory header) { + header = new bytes(89); + assembly { + mstore(add(header, add(0x20, 25)), 1) + } + rollup.importGenesisBatch(header, bytes32(uint256(1))); + assertEq(rollup.committedBatches(0), keccak256(header)); + } + + function _constructBatchStruct( + uint8 version, + bytes memory parentHeader, + uint256 numL1Message, + uint256 numL2Transaction + ) + internal + view + returns ( + bytes memory bitmap, + bytes[] memory chunks, + bytes memory blobDataProof, + uint256 index, + uint256 totalL1MessagePopped, + bytes memory header + ) + { + uint256 batchPtr; + assembly { + batchPtr := add(parentHeader, 0x20) + } + index = BatchHeaderV0Codec.getBatchIndex(batchPtr) + 1; + totalL1MessagePopped = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr) + numL1Message; + bytes32 parentHash = keccak256(parentHeader); + blobDataProof = hex"2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e68753ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0a5a0c9e8a145c5ef6e415c245690effa2914ec9393f58a7251d30c0657da1453d9ad906eae8b97dd60c9a216f81b4df7af34d01e214e1ec5865f0133ecc16d7459e49dab66087340677751e82097fbdd20551d66076f425775d1758a9dfd186b"; + bytes32[] memory hashes = new bytes32[](numL1Message); + for (uint256 i = 0; i < numL1Message; ++i) { + hashes[i] = messageQueue.getCrossDomainMessage(BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr) + i); + } + // commit batch, one chunk with one block, 1 + numL1Message tx, numL1Message L1 message + // payload for data hash of chunk0 + // hex(index) // block number + // hex(index) // timestamp + // 0000000000000000000000000000000000000000000000000000000000000000 // baseFee + // 0000000000000000 // gasLimit + // hex(numL2Transaction + numL1Message) // numTransactions + // ... // l1 messages + // data hash for chunk0 + // keccak256(chunk0) + // data hash for all chunks + // keccak256(keccak256(chunk0)) + // => payload for batch header + // 03 // version + // hex(index) // batchIndex + // hex(numL1Message) // l1MessagePopped + // hex(totalL1MessagePopped) // totalL1MessagePopped + // keccak256(keccak256(chunk0)) // dataHash + // 013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757 // blobVersionedHash + // keccak256(parentHeader) // parentBatchHash + // hex(index) // lastBlockTimestamp + // 2c9d777660f14ad49803a6442935c0d24a0d83551de5995890bf70a17d24e687 // blobDataProof + // 53ab0fe6807c7081f0885fe7da741554d658a03730b1fa006f8319f8b993bcb0 // blobDataProof + if (numL1Message > 0) bitmap = new bytes(32); + chunks = new bytes[](1); + bytes memory chunk0; + chunk0 = new bytes(1 + 60); + assembly { + mstore(add(chunk0, 0x20), shl(248, 1)) // numBlocks = 1 + mstore(add(chunk0, add(0x21, 8)), shl(192, index)) // timestamp = 0x123 + mstore(add(chunk0, add(0x21, 56)), shl(240, add(numL1Message, numL2Transaction))) // numTransactions = numL2Transaction + numL1Message + mstore(add(chunk0, add(0x21, 58)), shl(240, numL1Message)) // numL1Messages + } + chunks[0] = chunk0; + bytes memory chunkData = new bytes(58 + numL1Message * 32); + assembly { + mcopy(add(chunkData, 0x20), add(chunk0, 0x21), 58) + mcopy(add(chunkData, 0x5a), add(hashes, 0x20), mul(32, mload(hashes))) + } + bytes32 dataHash = keccak256(abi.encode(keccak256(chunkData))); + header = new bytes(193); + assembly { + mstore8(add(header, 0x20), version) // version + mstore(add(header, add(0x20, 1)), shl(192, index)) // batchIndex + mstore(add(header, add(0x20, 9)), shl(192, numL1Message)) // l1MessagePopped + mstore(add(header, add(0x20, 17)), shl(192, totalL1MessagePopped)) // totalL1MessagePopped + mstore(add(header, add(0x20, 25)), dataHash) // dataHash + mstore(add(header, add(0x20, 57)), 0x013590dc3544d56629ba81bb14d4d31248f825001653aa575eb8e3a719046757) // blobVersionedHash + mstore(add(header, add(0x20, 89)), parentHash) // parentBatchHash + mstore(add(header, add(0x20, 121)), shl(192, index)) // lastBlockTimestamp + mcopy(add(header, add(0x20, 129)), add(blobDataProof, 0x20), 64) // blobDataProof + } + } + + function _commitBatch( + uint8 version, + bytes memory parentHeader, + uint256 numL1Message, + uint256 numL2Transaction + ) internal returns (bytes memory) { + ( + bytes memory bitmap, + bytes[] memory chunks, + bytes memory blobDataProof, + uint256 index, + uint256 totalL1MessagePopped, + bytes memory header + ) = _constructBatchStruct(version, parentHeader, numL1Message, numL2Transaction); + hevm.startPrank(address(0)); + if (numL1Message > 0) { + hevm.expectEmit(false, false, false, true); + emit DequeueTransaction(totalL1MessagePopped - numL1Message, numL1Message, 0); + } + hevm.expectEmit(true, true, false, true); + emit CommitBatch(index, keccak256(header)); + rollup.commitBatchWithBlobProof(version, parentHeader, chunks, bitmap, blobDataProof); + hevm.stopPrank(); + assertEq(rollup.committedBatches(index), keccak256(header)); + assertEq(messageQueue.pendingQueueIndex(), totalL1MessagePopped); + return header; + } }