@@ -2,13 +2,16 @@ package logic
2
2
3
3
import (
4
4
"context"
5
+ "fmt"
5
6
"math/big"
6
7
8
+ "github.com/scroll-tech/da-codec/encoding"
7
9
"github.com/scroll-tech/go-ethereum/common"
8
10
"github.com/scroll-tech/go-ethereum/core/types"
9
11
"github.com/scroll-tech/go-ethereum/crypto"
10
12
"github.com/scroll-tech/go-ethereum/ethclient"
11
13
"github.com/scroll-tech/go-ethereum/log"
14
+ "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
12
15
13
16
backendabi "scroll-tech/bridge-history-api/abi"
14
17
"scroll-tech/bridge-history-api/internal/config"
@@ -19,15 +22,17 @@ import (
19
22
20
23
// L1EventParser the l1 event parser
21
24
type L1EventParser struct {
22
- cfg * config.FetcherConfig
23
- client * ethclient.Client
25
+ cfg * config.FetcherConfig
26
+ client * ethclient.Client
27
+ blobClient blob_client.BlobClient
24
28
}
25
29
26
30
// NewL1EventParser creates l1 event parser
27
- func NewL1EventParser (cfg * config.FetcherConfig , client * ethclient.Client ) * L1EventParser {
31
+ func NewL1EventParser (cfg * config.FetcherConfig , client * ethclient.Client , blobClient blob_client. BlobClient ) * L1EventParser {
28
32
return & L1EventParser {
29
- cfg : cfg ,
30
- client : client ,
33
+ cfg : cfg ,
34
+ client : client ,
35
+ blobClient : blobClient ,
31
36
}
32
37
}
33
38
@@ -233,6 +238,19 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
233
238
234
239
// ParseL1BatchEventLogs parses L1 watched batch events.
235
240
func (e * L1EventParser ) ParseL1BatchEventLogs (ctx context.Context , logs []types.Log , client * ethclient.Client ) ([]* orm.BatchEvent , error ) {
241
+ // Since codecv7 introduced multiple CommitBatch events per transaction,
242
+ // each CommitBatch event corresponds to an individual blob containing block range data.
243
+ // To correctly process these events, we need to:
244
+ // 1. Parse the associated blob data to extract the block range for each event
245
+ // 2. Maintain a per-transaction index counter to track processing position in the blob sequence
246
+ //
247
+ // The index map serves this purpose with:
248
+ // Key: commit transaction hash (identifies the transaction containing multiple CommitBatches)
249
+ // Value: current blob index pointer (indicates next blob to process for this transaction)
250
+ //
251
+ // Each processed CommitBatch event will increment the index by 1,
252
+ // ensuring sequential processing of blobs within the same transaction.
253
+ txBlobIndexMap := make (map [common.Hash ]int )
236
254
var l1BatchEvents []* orm.BatchEvent
237
255
for _ , vlog := range logs {
238
256
switch vlog .Topics [0 ] {
@@ -247,11 +265,37 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
247
265
log .Error ("Failed to get commit batch tx or the tx is still pending" , "err" , err , "isPending" , isPending )
248
266
return nil , err
249
267
}
250
- startBlock , endBlock , err := utils .GetBatchRangeFromCalldata (commitTx .Data ())
268
+ version , startBlock , endBlock , err := utils .GetBatchVersionAndBlockRangeFromCalldata (commitTx .Data ())
251
269
if err != nil {
252
270
log .Error ("Failed to get batch range from calldata" , "hash" , commitTx .Hash ().String (), "height" , vlog .BlockNumber )
253
271
return nil , err
254
272
}
273
+ if version >= 7 { // It's a batch with version >= 7.
274
+ currentIndex := txBlobIndexMap [vlog .TxHash ]
275
+
276
+ if currentIndex >= len (commitTx .BlobHashes ()) {
277
+ return nil , fmt .Errorf ("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)" ,
278
+ vlog .TxHash .String (), len (commitTx .BlobHashes ()), currentIndex , event .BatchIndex .Uint64 ())
279
+ }
280
+ header , err := client .HeaderByHash (ctx , vlog .BlockHash )
281
+ if err != nil {
282
+ return nil , fmt .Errorf ("failed to get L1 block header for blob context, blockHash: %s, err: %w" , vlog .BlockHash .Hex (), err )
283
+ }
284
+ blobVersionedHash := commitTx .BlobHashes ()[currentIndex ]
285
+ blocks , err := e .getBatchBlockRangeFromBlob (ctx , version , blobVersionedHash , header .Time )
286
+ if err != nil {
287
+ return nil , fmt .Errorf ("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w" ,
288
+ blobVersionedHash .String (), vlog .BlockNumber , currentIndex , err )
289
+ }
290
+ if len (blocks ) == 0 {
291
+ return nil , fmt .Errorf ("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d" ,
292
+ blobVersionedHash .String (), vlog .BlockNumber , currentIndex )
293
+ }
294
+ startBlock = blocks [0 ].Number ()
295
+ endBlock = blocks [len (blocks )- 1 ].Number ()
296
+
297
+ txBlobIndexMap [vlog .TxHash ] = currentIndex + 1
298
+ }
255
299
l1BatchEvents = append (l1BatchEvents , & orm.BatchEvent {
256
300
BatchStatus : int (btypes .BatchStatusTypeCommitted ),
257
301
BatchIndex : event .BatchIndex .Uint64 (),
@@ -260,8 +304,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
260
304
EndBlockNumber : endBlock ,
261
305
L1BlockNumber : vlog .BlockNumber ,
262
306
})
263
- case backendabi .L1RevertBatchEventSig :
264
- event := backendabi.L1RevertBatchEvent {}
307
+ case backendabi .L1RevertBatchV0EventSig :
308
+ event := backendabi.L1RevertBatchV0Event {}
265
309
if err := utils .UnpackLog (backendabi .IScrollChainABI , & event , "RevertBatch" , vlog ); err != nil {
266
310
log .Error ("Failed to unpack RevertBatch event" , "err" , err )
267
311
return nil , err
@@ -272,6 +316,19 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
272
316
BatchHash : event .BatchHash .String (),
273
317
L1BlockNumber : vlog .BlockNumber ,
274
318
})
319
+ case backendabi .L1RevertBatchV7EventSig :
320
+ event := backendabi.L1RevertBatchV7Event {}
321
+ if err := utils .UnpackLog (backendabi .IScrollChainABI , & event , "RevertBatch0" , vlog ); err != nil {
322
+ log .Error ("Failed to unpack RevertBatch event" , "err" , err )
323
+ return nil , err
324
+ }
325
+ for i := event .StartBatchIndex .Uint64 (); i <= event .FinishBatchIndex .Uint64 (); i ++ {
326
+ l1BatchEvents = append (l1BatchEvents , & orm.BatchEvent {
327
+ BatchStatus : int (btypes .BatchStatusTypeReverted ),
328
+ BatchIndex : i ,
329
+ L1BlockNumber : vlog .BlockNumber ,
330
+ })
331
+ }
275
332
case backendabi .L1FinalizeBatchEventSig :
276
333
event := backendabi.L1FinalizeBatchEvent {}
277
334
if err := utils .UnpackLog (backendabi .IScrollChainABI , & event , "FinalizeBatch" , vlog ); err != nil {
@@ -389,3 +446,32 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
389
446
}
390
447
return sender .String (), nil
391
448
}
449
+
450
+ func (e * L1EventParser ) getBatchBlockRangeFromBlob (ctx context.Context , version uint8 , versionedHash common.Hash , l1BlockTime uint64 ) ([]encoding.DABlock , error ) {
451
+ blob , err := e .blobClient .GetBlobByVersionedHashAndBlockTime (ctx , versionedHash , l1BlockTime )
452
+ if err != nil {
453
+ return nil , fmt .Errorf ("failed to get blob %s: %w" , versionedHash .Hex (), err )
454
+ }
455
+ if blob == nil {
456
+ return nil , fmt .Errorf ("blob %s not found" , versionedHash .Hex ())
457
+ }
458
+
459
+ codec , err := encoding .CodecFromVersion (encoding .CodecVersion (version ))
460
+ if err != nil {
461
+ return nil , fmt .Errorf ("unsupported codec version: %v, err: %w" , version , err )
462
+ }
463
+
464
+ blobPayload , err := codec .DecodeBlob (blob )
465
+ if err != nil {
466
+ return nil , fmt .Errorf ("blob %s decode error: %w" , versionedHash .Hex (), err )
467
+ }
468
+
469
+ blocks := blobPayload .Blocks ()
470
+ if len (blocks ) == 0 {
471
+ return nil , fmt .Errorf ("empty blocks in blob %s" , versionedHash .Hex ())
472
+ }
473
+
474
+ log .Debug ("Successfully processed blob" , "versionedHash" , versionedHash .Hex (), "blocksCount" , len (blocks ))
475
+
476
+ return blocks , nil
477
+ }
0 commit comments