From 3d92f68729167b6d7e2a087fcd583cb0ac0ab264 Mon Sep 17 00:00:00 2001 From: colin <102356659+colinlyguo@users.noreply.github.com> Date: Mon, 21 Oct 2024 22:24:38 +0800 Subject: [PATCH] refactor: use new da-codec interfaces (#1068) * refactor: use new da-codec interfaces * go mod tidy * tweak * add a tweak * tweak logs * update da-codec commit * refactor: use new da codec interfaces in syncing from L1 (#1078) * use new da-codec interface in syncing from l1 * delete unused * nit * uncomment * typo --------- Co-authored-by: colinlyguo * update da-codec * use IsL1MessageSkipped in da-codec repo * use canonical version * address AI's comments: add nil checks --------- Co-authored-by: Nazarii Denha --- core/rawdb/accessors_rollup_event.go | 41 --- core/rawdb/accessors_rollup_event_test.go | 64 ----- core/rawdb/schema.go | 6 - go.mod | 4 +- go.sum | 12 +- rollup/da_syncer/batch_queue.go | 2 +- rollup/da_syncer/da/calldata_blob_source.go | 27 +- rollup/da_syncer/da/commitV0.go | 27 +- rollup/da_syncer/da/commitV1.go | 30 +-- rollup/da_syncer/da/commitV2.go | 40 --- rollup/da_syncer/da/commitV4.go | 40 --- rollup/da_syncer/da/da.go | 8 +- .../rollup_sync_service.go | 245 +++--------------- .../rollup_sync_service_test.go | 225 ++++++++++------ 14 files changed, 234 insertions(+), 537 deletions(-) delete mode 100644 rollup/da_syncer/da/commitV2.go delete mode 100644 rollup/da_syncer/da/commitV4.go diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 6670b4b7b85f..1b60f6e4f0d8 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { return &rollupEventSyncedL1BlockNumber } -// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. -// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { - value, err := rlp.EncodeToBytes(chunkBlockRanges) - if err != nil { - log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err) - } - if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil { - log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err) - } -} - -// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. -// Note: Only non-finalized batches can be reverted. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { - if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { - log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) - } -} - -// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. -// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { - data, err := db.Get(batchChunkRangesKey(batchIndex)) - if err != nil && isNotFoundErr(err) { - return nil - } - if err != nil { - log.Crit("failed to read batch chunk ranges from database", "err", err) - } - - cr := new([]*ChunkBlockRange) - if err := rlp.Decode(bytes.NewReader(data), cr); err != nil { - log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err) - } - return *cr -} - // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { value, err := rlp.EncodeToBytes(finalizedBatchMeta) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index bbe82efde59a..5eb165dcb0c8 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) { } } -func TestBatchChunkRanges(t *testing.T) { - chunks := [][]*ChunkBlockRange{ - { - {StartBlockNumber: 1, EndBlockNumber: 100}, - {StartBlockNumber: 101, EndBlockNumber: 200}, - }, - { - {StartBlockNumber: 201, EndBlockNumber: 300}, - {StartBlockNumber: 301, EndBlockNumber: 400}, - }, - { - {StartBlockNumber: 401, EndBlockNumber: 500}, - }, - } - - db := NewMemoryDatabase() - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - WriteBatchChunkRanges(db, batchIndex, chunkRange) - } - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if len(readChunkRange) != len(chunkRange) { - t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange)) - } - - for j, cr := range readChunkRange { - if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber { - t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr) - } - } - } - - // over-write - newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}} - WriteBatchChunkRanges(db, 0, newRange) - readChunkRange := ReadBatchChunkRanges(db, 0) - if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 { - t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange) - } - - // read non-existing value - if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil { - t.Fatal("Expected nil for non-existing value", "got", readChunkRange) - } - - // delete: revert batch - for i := range chunks { - batchIndex := uint64(i) - DeleteBatchChunkRanges(db, batchIndex) - - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if readChunkRange != nil { - t.Fatal("Chunk range was not deleted", "batch index", batchIndex) - } - } - - // delete non-existing value: ensure the delete operation handles non-existing values without errors. - DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) -} - func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b553045f0a40..aec2c365f661 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -149,7 +149,6 @@ var ( // Scroll rollup event store rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber") - batchChunkRangesPrefix = []byte("R-bcr") batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") @@ -410,11 +409,6 @@ func SkippedTransactionHashKey(index uint64) []byte { return append(skippedTransactionHashPrefix, encodeBigEndian(index)...) } -// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian) -func batchChunkRangesKey(batchIndex uint64) []byte { - return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...) -} - // batchMetaKey = batchMetaPrefix + batch index (uint64 big endian) func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) diff --git a/go.mod b/go.mod index ab4da0ed3a09..eb5cbb594568 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.1 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 @@ -57,7 +57,7 @@ require ( github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967 + github.com/scroll-tech/da-codec v0.1.2 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index e3c2b369d3f9..fa1a27460115 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,10 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38= +github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -472,8 +474,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967 h1:FSM0l1n5KszBjPFOnMbSa4pg3zv07DYIU2VnH6BUH34= -github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967/go.mod h1:O9jsbQGNnTEfyfZg7idevq6jGGSQshX70elX+TRH8vU= +github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= +github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -693,9 +695,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go index 7a3d094f6322..a0172a86c077 100644 --- a/rollup/da_syncer/batch_queue.go +++ b/rollup/da_syncer/batch_queue.go @@ -41,7 +41,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) { return nil, err } switch daEntry.Type() { - case da.CommitBatchV0Type, da.CommitBatchV1Type, da.CommitBatchV2Type: + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: bq.addBatch(daEntry) case da.RevertBatchType: bq.deleteBatch(daEntry) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 231cc4c1829e..47eabfceb65f 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" @@ -205,19 +206,21 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo if err != nil { return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) } - if method.Name == commitBatchMethodName { args, err := newCommitBatchArgs(method, values) if err != nil { return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } switch args.Version { case 0: - return NewCommitBatchDAV0(ds.db, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) - case 1: - return NewCommitBatchDAV1(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - case 2: - return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) + case 1, 2: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) default: return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } @@ -226,12 +229,14 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo if err != nil { return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } switch args.Version { - case 3: - // we can use V2 for version 3, because it's same - return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - case 4: - return NewCommitBatchDAV4(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + case 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) default: return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 66a13786c9cb..135a76d79518 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" @@ -18,13 +17,14 @@ type CommitBatchDAV0 struct { batchIndex uint64 parentTotalL1MessagePopped uint64 skippedL1MessageBitmap []byte - chunks []*codecv0.DAChunkRawTx + chunks []*encoding.DAChunkRawTx l1Txs []*types.L1MessageTx l1BlockNumber uint64 } func NewCommitBatchDAV0(db ethdb.Database, + codec encoding.Codec, version uint8, batchIndex uint64, parentBatchHeader []byte, @@ -32,7 +32,7 @@ func NewCommitBatchDAV0(db ethdb.Database, skippedL1MessageBitmap []byte, l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { - decodedChunks, err := codecv0.DecodeDAChunksRawTx(chunks) + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) } @@ -44,7 +44,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, version uint8, batchIndex uint64, parentBatchHeader []byte, - decodedChunks []*codecv0.DAChunkRawTx, + decodedChunks []*encoding.DAChunkRawTx, skippedL1MessageBitmap []byte, l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { @@ -100,24 +100,24 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock { for _, chunk := range c.chunks { for blockId, daBlock := range chunk.Blocks { // create txs - txs := make(types.Transactions, 0, daBlock.NumTransactions) + txs := make(types.Transactions, 0, daBlock.NumTransactions()) // insert l1 msgs - for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages) { + for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages()) { l1Tx := types.NewTx(c.l1Txs[l1TxPointer]) txs = append(txs, l1Tx) l1TxPointer++ } - curL1TxIndex += uint64(daBlock.NumL1Messages) + curL1TxIndex += uint64(daBlock.NumL1Messages()) // insert l2 txs txs = append(txs, chunk.Transactions[blockId]...) block := NewPartialBlock( &PartialHeader{ - Number: daBlock.BlockNumber, - Time: daBlock.Timestamp, - BaseFee: daBlock.BaseFee, - GasLimit: daBlock.GasLimit, + Number: daBlock.Number(), + Time: daBlock.Timestamp(), + BaseFee: daBlock.BaseFee(), + GasLimit: daBlock.GasLimit(), Difficulty: 10, // TODO: replace with real difficulty ExtraData: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // TODO: replace with real extra data }, @@ -129,11 +129,11 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock { return blocks } -func getTotalMessagesPoppedFromChunks(decodedChunks []*codecv0.DAChunkRawTx) int { +func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) int { totalL1MessagePopped := 0 for _, chunk := range decodedChunks { for _, block := range chunk.Blocks { - totalL1MessagePopped += int(block.NumL1Messages) + totalL1MessagePopped += int(block.NumL1Messages()) } } return totalL1MessagePopped @@ -141,7 +141,6 @@ func getTotalMessagesPoppedFromChunks(decodedChunks []*codecv0.DAChunkRawTx) int func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { var txs []*types.L1MessageTx - decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) if err != nil { return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index d94a046c81df..4670eec8bbcb 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -5,8 +5,7 @@ import ( "crypto/sha256" "fmt" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" @@ -21,7 +20,8 @@ type CommitBatchDAV1 struct { *CommitBatchDAV0 } -func NewCommitBatchDAV1(ctx context.Context, db ethdb.Database, +func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, + codec encoding.Codec, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, vLog *types.Log, @@ -31,21 +31,7 @@ func NewCommitBatchDAV1(ctx context.Context, db ethdb.Database, chunks [][]byte, skippedL1MessageBitmap []byte, ) (*CommitBatchDAV1, error) { - return NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv1.DecodeTxsFromBlob) -} - -func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, - decodeTxsFromBlobFunc func(*kzg4844.Blob, []*codecv0.DAChunkRawTx) error, -) (*CommitBatchDAV1, error) { - decodedChunks, err := codecv1.DecodeDAChunksRawTx(chunks) + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) } @@ -74,11 +60,15 @@ func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database } // decode txs from blob - err = decodeTxsFromBlobFunc(blob, decodedChunks) + err = codec.DecodeTxsFromBlob(blob, decodedChunks) if err != nil { return nil, fmt.Errorf("failed to decode txs from blob: %w", err) } + if decodedChunks == nil { + return nil, fmt.Errorf("decodedChunks is nil after decoding") + } + v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) if err != nil { return nil, err @@ -88,5 +78,5 @@ func NewCommitBatchDAV1WithBlobDecodeFunc(ctx context.Context, db ethdb.Database } func (c *CommitBatchDAV1) Type() Type { - return CommitBatchV1Type + return CommitBatchWithBlobType } diff --git a/rollup/da_syncer/da/commitV2.go b/rollup/da_syncer/da/commitV2.go deleted file mode 100644 index c1e6d353fc5b..000000000000 --- a/rollup/da_syncer/da/commitV2.go +++ /dev/null @@ -1,40 +0,0 @@ -package da - -import ( - "context" - - "github.com/scroll-tech/da-codec/encoding/codecv2" - - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -type CommitBatchDAV2 struct { - *CommitBatchDAV1 -} - -func NewCommitBatchDAV2(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, -) (*CommitBatchDAV2, error) { - - v1, err := NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv2.DecodeTxsFromBlob) - if err != nil { - return nil, err - } - - return &CommitBatchDAV2{v1}, nil -} - -func (c *CommitBatchDAV2) Type() Type { - return CommitBatchV2Type -} diff --git a/rollup/da_syncer/da/commitV4.go b/rollup/da_syncer/da/commitV4.go deleted file mode 100644 index 9b590b2bfff5..000000000000 --- a/rollup/da_syncer/da/commitV4.go +++ /dev/null @@ -1,40 +0,0 @@ -package da - -import ( - "context" - - "github.com/scroll-tech/da-codec/encoding/codecv4" - - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -type CommitBatchDAV4 struct { - *CommitBatchDAV1 -} - -func NewCommitBatchDAV4(ctx context.Context, db ethdb.Database, - l1Client *rollup_sync_service.L1Client, - blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, - parentBatchHeader []byte, - chunks [][]byte, - skippedL1MessageBitmap []byte, -) (*CommitBatchDAV2, error) { - - v1, err := NewCommitBatchDAV1WithBlobDecodeFunc(ctx, db, l1Client, blobClient, vLog, version, batchIndex, parentBatchHeader, chunks, skippedL1MessageBitmap, codecv4.DecodeTxsFromBlob) - if err != nil { - return nil, err - } - - return &CommitBatchDAV2{v1}, nil -} - -func (c *CommitBatchDAV4) Type() Type { - return CommitBatchV4Type -} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 5f00e86115a1..1ad618d7ba3d 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -11,12 +11,8 @@ type Type int const ( // CommitBatchV0Type contains data of event of CommitBatchV0Type CommitBatchV0Type Type = iota - // CommitBatchV1Type contains data of event of CommitBatchV1Type - CommitBatchV1Type - // CommitBatchV2Type contains data of event of CommitBatchV2Type - CommitBatchV2Type - // CommitBatchV4Type contains data of event of CommitBatchV2Type - CommitBatchV4Type + // CommitBatchWithBlobType contains data of event of CommitBatchWithBlobType (v1, v2, v3, v4) + CommitBatchWithBlobType // RevertBatchType contains data of event of RevertBatchType RevertBatchType // FinalizeBatchType contains data of event of FinalizeBatchType diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 6838342adcf5..e132456b2cad 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,18 +4,12 @@ import ( "context" "encoding/json" "fmt" - "math/big" "os" "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" - "github.com/scroll-tech/da-codec/encoding/codecv2" - "github.com/scroll-tech/da-codec/encoding/codecv3" - "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -221,12 +215,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: event := &L1RevertBatchEvent{} @@ -237,7 +230,6 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Trace("found new RevertBatch event", "batch index", batchIndex) rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) - rawdb.DeleteBatchChunkRanges(s.db, batchIndex) case s.l1FinalizeBatchEventSignature: event := &L1FinalizeBatchEvent{} @@ -272,12 +264,12 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB for index := startBatchIndex; index <= batchIndex; index++ { committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) - chunks, err := s.getLocalChunksForBatch(index) + chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -312,12 +304,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB return nil } -func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) { - chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex) +func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.ChunkBlockRange) ([]*encoding.Chunk, error) { if len(chunkBlockRanges) == 0 { - return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges") + return nil, fmt.Errorf("chunkBlockRanges is empty") } - endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber for i := 0; i < defaultMaxRetries; i++ { if s.ctx.Err() != nil { @@ -365,13 +355,13 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { if batchIndex == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, - }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + }, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -380,11 +370,11 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -396,7 +386,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types } } if !found { - return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -405,19 +395,19 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types if tx.Type() == types.BlobTxType { blobVersionedHashes := tx.BlobHashes() if blobVersionedHashes == nil { - return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) } commitBatchMeta.BlobVersionedHashes = blobVersionedHashes } version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) } commitBatchMeta.Version = version commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, ranges, nil + return &commitBatchMeta, nil } // decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. @@ -492,10 +482,8 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // - batchIndex: batch index of the validated batch // - event: L1 finalize batch event data // - parentFinalizedBatchMeta: metadata of the finalized parent batch -// - committedBatchMeta: committed batch metadata stored in the database. -// Can be nil for older client versions that don't store this information. +// - committedBatchMeta: committed batch metadata stored in the database // - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -506,7 +494,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -531,71 +519,17 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion encoding.CodecVersion - if committedBatchMeta != nil { - codecVersion = encoding.CodecVersion(committedBatchMeta.Version) - } else { - codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) + codecVersion := encoding.CodecVersion(committedBatchMeta.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return 0, nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) } - var localBatchHash common.Hash - if codecVersion == encoding.CodecV0 { - daBatch, err := codecv0.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV1 { - daBatch, err := codecv1.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV2 { - daBatch, err := codecv2.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV3 { - daBatch, err := codecv3.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV4 { - // Check if committedBatchMeta exists, for backward compatibility with older client versions - if committedBatchMeta == nil { - return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) - } - - // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { - return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) - } - - // Attempt to create DA batch with compression - daBatch, err := codecv4.NewDABatch(batch, true) - if err != nil { - // If compression fails, try without compression - log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Inconsistent blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } - - localBatchHash = daBatch.Hash() - } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + daBatch, err := codec.NewDABatch(batch) + if err != nil { + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) } + localBatchHash := daBatch.Hash() localStateRoot := endBlock.Header.Root localWithdrawRoot := endBlock.WithdrawRoot @@ -647,126 +581,29 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// determineCodecVersion determines the codec version based on the block number and chain configuration. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { - switch { - case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): - return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli - case !chainCfg.IsCurie(startBlockNumber): - return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie - case !chainCfg.IsDarwin(startBlockNumber, startBlockTimestamp): - return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - case !chainCfg.IsDarwinV2(startBlockNumber, startBlockTimestamp): - return encoding.CodecV3 // codecv3: batches after Darwin - default: - return encoding.CodecV4 // codecv4: batches after DarwinV2 - } -} - // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, chunk := range chunks { - if len(chunk) < 1 { - return nil, fmt.Errorf("invalid chunk, length is less than 1") - } - - numBlocks := int(chunk[0]) - - switch codecVersion { - case encoding.CodecV0: - if len(chunk) < 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv0.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv0.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV1: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv1.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv1.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV2: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv2.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv2.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV3: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv3.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv3.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV4: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv4.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv4.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - default: - return nil, fmt.Errorf("unexpected batch version %v", codecVersion) + var chunkBlockRanges []*rawdb.ChunkBlockRange + for _, daChunkRawTx := range daChunksRawTx { + if len(daChunkRawTx.Blocks) == 0 { + return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) } + + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: daChunkRawTx.Blocks[0].Number(), + EndBlockNumber: daChunkRawTx.Blocks[len(daChunkRawTx.Blocks)-1].Number(), + }) } + return chunkBlockRanges, nil } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 83b8c72c3d15..f1b09a37a1f2 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -313,7 +313,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) @@ -324,13 +324,13 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -367,7 +367,7 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) @@ -376,13 +376,13 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { {StartBlockNumber: 1, EndBlockNumber: 11}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -419,7 +419,7 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) @@ -456,13 +456,13 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -499,7 +499,7 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) @@ -537,20 +537,18 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { {StartBlockNumber: 70, EndBlockNumber: 70}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } func TestValidateBatchCodecv0(t *testing.T) { - chainConfig := ¶ms.ChainConfig{} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -560,50 +558,57 @@ func TestValidateBatchCodecv0(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv1(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -613,50 +618,56 @@ func TestValidateBatchCodecv1(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv2(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -666,50 +677,56 @@ func TestValidateBatchCodecv2(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv3(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -719,7 +736,7 @@ func TestValidateBatchCodecv3(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), @@ -727,46 +744,53 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchUpgrades(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(3), CurieBlock: big.NewInt(14), DarwinTime: func() *uint64 { t := uint64(1684762320); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), @@ -774,82 +798,97 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 0, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 0, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) event3 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(2), BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + } + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta4 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ BatchHash: event3.BatchHash, TotalL1MessagePopped: 11, StateRoot: event3.StateRoot, WithdrawRoot: event3.WithdrawRoot, } - assert.Equal(t, parentBatchMeta4, finalizedBatchMeta3) + assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) event4 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(3), BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event4.BatchHash, TotalL1MessagePopped: 42, StateRoot: event4.StateRoot, WithdrawRoot: event4.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func TestValidateBatchInFinalizeByBundle(t *testing.T) { - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: func() *uint64 { t := uint64(0); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") @@ -867,29 +906,49 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + } + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event.BatchHash, TotalL1MessagePopped: 42, StateRoot: event.StateRoot, WithdrawRoot: event.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {