diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 6670b4b7b85f..1b60f6e4f0d8 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { return &rollupEventSyncedL1BlockNumber } -// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. -// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { - value, err := rlp.EncodeToBytes(chunkBlockRanges) - if err != nil { - log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err) - } - if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil { - log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err) - } -} - -// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. -// Note: Only non-finalized batches can be reverted. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { - if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { - log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) - } -} - -// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. -// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { - data, err := db.Get(batchChunkRangesKey(batchIndex)) - if err != nil && isNotFoundErr(err) { - return nil - } - if err != nil { - log.Crit("failed to read batch chunk ranges from database", "err", err) - } - - cr := new([]*ChunkBlockRange) - if err := rlp.Decode(bytes.NewReader(data), cr); err != nil { - log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err) - } - return *cr -} - // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { value, err := rlp.EncodeToBytes(finalizedBatchMeta) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index c74e93524376..a22880ee05a4 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) { } } -func TestBatchChunkRanges(t *testing.T) { - chunks := [][]*ChunkBlockRange{ - { - {StartBlockNumber: 1, EndBlockNumber: 100}, - {StartBlockNumber: 101, EndBlockNumber: 200}, - }, - { - {StartBlockNumber: 201, EndBlockNumber: 300}, - {StartBlockNumber: 301, EndBlockNumber: 400}, - }, - { - {StartBlockNumber: 401, EndBlockNumber: 500}, - }, - } - - db := NewMemoryDatabase() - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - WriteBatchChunkRanges(db, batchIndex, chunkRange) - } - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if len(readChunkRange) != len(chunkRange) { - t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange)) - } - - for j, cr := range readChunkRange { - if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber { - t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr) - } - } - } - - // over-write - newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}} - WriteBatchChunkRanges(db, 0, newRange) - readChunkRange := ReadBatchChunkRanges(db, 0) - if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 { - t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange) - } - - // read non-existing value - if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil { - t.Fatal("Expected nil for non-existing value", "got", readChunkRange) - } - - // delete: revert batch - for i := range chunks { - batchIndex := uint64(i) - DeleteBatchChunkRanges(db, batchIndex) - - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if readChunkRange != nil { - t.Fatal("Chunk range was not deleted", "batch index", batchIndex) - } - } - - // delete non-existing value: ensure the delete operation handles non-existing values without errors. - DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) -} - func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2f8281c83d1d..d7f95a2311ee 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -112,7 +112,6 @@ var ( // Scroll rollup event store rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber") - batchChunkRangesPrefix = []byte("R-bcr") batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") @@ -301,11 +300,6 @@ func SkippedTransactionHashKey(index uint64) []byte { return append(skippedTransactionHashPrefix, encodeBigEndian(index)...) } -// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian) -func batchChunkRangesKey(batchIndex uint64) []byte { - return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...) -} - // batchMetaKey = batchMetaPrefix + batch index (uint64 big endian) func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) diff --git a/go.mod b/go.mod index 658a06109d0e..df82e665f66c 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac + github.com/scroll-tech/da-codec v0.1.1-0.20240905164927-c4a249537b1c github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index 290418161087..33194b201e23 100644 --- a/go.sum +++ b/go.sum @@ -392,8 +392,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/da-codec v0.1.1-0.20240905164927-c4a249537b1c h1:XbDzqf3XVUaEzsmSCETmVFVactHgTEztfcWXkwZzrBU= +github.com/scroll-tech/da-codec v0.1.1-0.20240905164927-c4a249537b1c/go.mod h1:IrW6YO4Xqk7JVuee7RBEuTr3mScMMS69B7Z/qIbTsxQ= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 3991debcb1c6..c19d7dc1cfa7 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,17 +4,11 @@ import ( "context" "encoding/json" "fmt" - "math/big" "os" "reflect" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" - "github.com/scroll-tech/da-codec/encoding/codecv2" - "github.com/scroll-tech/da-codec/encoding/codecv3" - "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -204,12 +198,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: event := &L1RevertBatchEvent{} @@ -220,7 +213,6 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Trace("found new RevertBatch event", "batch index", batchIndex) rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) - rawdb.DeleteBatchChunkRanges(s.db, batchIndex) case s.l1FinalizeBatchEventSignature: event := &L1FinalizeBatchEvent{} @@ -255,12 +247,12 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB for index := startBatchIndex; index <= batchIndex; index++ { committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) - chunks, err := s.getLocalChunksForBatch(index) + chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -295,12 +287,7 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB return nil } -func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) { - chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex) - if len(chunkBlockRanges) == 0 { - return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges") - } - +func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.ChunkBlockRange) ([]*encoding.Chunk, error) { endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber for i := 0; i < defaultMaxRetries; i++ { if s.ctx.Err() != nil { @@ -348,13 +335,13 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { if batchIndex == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, - }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + }, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -363,11 +350,11 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -379,7 +366,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types } } if !found { - return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -388,19 +375,19 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types if tx.Type() == types.BlobTxType { blobVersionedHashes := tx.BlobHashes() if blobVersionedHashes == nil { - return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) } commitBatchMeta.BlobVersionedHashes = blobVersionedHashes } version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) } commitBatchMeta.Version = version commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, ranges, nil + return &commitBatchMeta, nil } // decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. @@ -476,9 +463,6 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // - event: L1 finalize batch event data // - parentFinalizedBatchMeta: metadata of the finalized parent batch // - committedBatchMeta: committed batch metadata stored in the database. -// Can be nil for older client versions that don't store this information. -// - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -489,7 +473,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -514,71 +498,17 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion encoding.CodecVersion - if committedBatchMeta != nil { - codecVersion = encoding.CodecVersion(committedBatchMeta.Version) - } else { - codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) + codecVersion := encoding.CodecVersion(committedBatchMeta.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return 0, nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) } - var localBatchHash common.Hash - if codecVersion == encoding.CodecV0 { - daBatch, err := codecv0.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV1 { - daBatch, err := codecv1.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV2 { - daBatch, err := codecv2.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV3 { - daBatch, err := codecv3.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV4 { - // Check if committedBatchMeta exists, for backward compatibility with older client versions - if committedBatchMeta == nil { - return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) - } - - // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { - return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) - } - - // Attempt to create DA batch with compression - daBatch, err := codecv4.NewDABatch(batch, true) - if err != nil { - // If compression fails, try without compression - log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Inconsistent blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } - - localBatchHash = daBatch.Hash() - } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + daBatch, err := codec.NewDABatchWithExpectedBlobVersionedHashes(batch, committedBatchMeta.BlobVersionedHashes) + if err != nil { + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) } + localBatchHash := daBatch.Hash() localStateRoot := endBlock.Header.Root localWithdrawRoot := endBlock.WithdrawRoot @@ -630,126 +560,30 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// determineCodecVersion determines the codec version based on the block number and chain configuration. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { - switch { - case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): - return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli - case !chainCfg.IsCurie(startBlockNumber): - return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie - case !chainCfg.IsDarwin(startBlockTimestamp): - return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - case !chainCfg.IsDarwinV2(startBlockTimestamp): - return encoding.CodecV3 // codecv3: batches after Darwin - default: - return encoding.CodecV4 // codecv4: batches after DarwinV2 - } -} - // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, chunk := range chunks { - if len(chunk) < 1 { - return nil, fmt.Errorf("invalid chunk, length is less than 1") - } - - numBlocks := int(chunk[0]) - - switch codecVersion { - case encoding.CodecV0: - if len(chunk) < 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv0.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv0.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV1: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv1.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv1.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV2: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv2.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv2.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV3: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv3.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv3.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("failed to get codec: %w", err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV4: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv4.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv4.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + daChunks, err := codec.DecodeDAChunks(chunks) + if err != nil { + return nil, fmt.Errorf("failed to decode DA chunks: %w", err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - default: - return nil, fmt.Errorf("unexpected batch version %v", codecVersion) + var chunkBlockRanges []*rawdb.ChunkBlockRange + for _, daChunk := range daChunks { + startBlockNumber, endBlockNumber, err := daChunk.BlockRange() + if err != nil { + return nil, fmt.Errorf("failed to get start block number: %w", err) } + + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: startBlockNumber, + EndBlockNumber: endBlockNumber, + }) } + return chunkBlockRanges, nil } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 61d63cdb7419..7a5f634b4a1a 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -313,7 +313,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) @@ -324,13 +324,13 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -367,7 +367,7 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) @@ -376,13 +376,13 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { {StartBlockNumber: 1, EndBlockNumber: 11}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -419,7 +419,7 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) @@ -456,13 +456,13 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -499,7 +499,7 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) @@ -537,20 +537,18 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { {StartBlockNumber: 70, EndBlockNumber: 70}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } func TestValidateBatchCodecv0(t *testing.T) { - chainConfig := ¶ms.ChainConfig{} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -560,50 +558,57 @@ func TestValidateBatchCodecv0(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv1(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -613,50 +618,56 @@ func TestValidateBatchCodecv1(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv2(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -666,50 +677,56 @@ func TestValidateBatchCodecv2(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv3(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -719,7 +736,7 @@ func TestValidateBatchCodecv3(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), @@ -727,46 +744,53 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchUpgrades(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(3), CurieBlock: big.NewInt(14), DarwinTime: func() *uint64 { t := uint64(1684762320); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), @@ -774,82 +798,97 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 0, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 0, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) event3 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(2), BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + } + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta4 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ BatchHash: event3.BatchHash, TotalL1MessagePopped: 11, StateRoot: event3.StateRoot, WithdrawRoot: event3.WithdrawRoot, } - assert.Equal(t, parentBatchMeta4, finalizedBatchMeta3) + assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) event4 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(3), BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event4.BatchHash, TotalL1MessagePopped: 42, StateRoot: event4.StateRoot, WithdrawRoot: event4.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func TestValidateBatchInFinalizeByBundle(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: func() *uint64 { t := uint64(0); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") @@ -867,29 +906,49 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + } + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event.BatchHash, TotalL1MessagePopped: 42, StateRoot: event.StateRoot, WithdrawRoot: event.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {