From 45b928c0e38f76634b3a59ba934e3c93741f8c0b Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 08:16:11 +0000 Subject: [PATCH 01/33] using batch-by-batch + latest changes from zkevm --- core/vm/zk_batch_counters.go | 2 +- smt/pkg/db/mdbx.go | 2 + turbo/stages/zk_stages.go | 3 +- zk/hermez_db/db.go | 143 +++-- zk/legacy_executor_verifier/executor.go | 4 +- .../legacy_executor_verifier.go | 144 ++++- zk/legacy_executor_verifier/promise.go | 24 + zk/stages/stage_sequence_execute.go | 188 +++--- .../stage_sequence_execute_data_stream.go | 87 +++ zk/stages/stage_sequence_execute_limbo.go | 106 ++++ zk/stages/stage_sequence_execute_utils.go | 92 +++ zk/stages/stage_sequence_execute_verifier.go | 191 +++++++ zk/stages/stage_sequencer_executor_verify.go | 537 +++++++++--------- zk/stages/stages.go | 28 +- zk/txpool/pool_zk_limbo_processor.go | 105 ++-- zk/utils/zk_tables.go | 30 + zk/witness/witness.go | 35 +- 17 files changed, 1203 insertions(+), 518 deletions(-) create mode 100644 zk/stages/stage_sequence_execute_data_stream.go create mode 100644 zk/stages/stage_sequence_execute_limbo.go create mode 100644 zk/stages/stage_sequence_execute_verifier.go create mode 100644 zk/utils/zk_tables.go diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index 5640ef2c93a..b0db1bf2ece 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -224,7 +224,7 @@ func (bcc *BatchCounterCollector) CombineCollectors(verifyMerkleProof bool) (Cou // rlp level counters and execution level counters // this one returns the counters as they are so far, without adding processBatchLevelData, processChangeL2Block and decodeChangeL2BlockTx // used to save batch counter progress without adding the said counters twice -func (bcc *BatchCounterCollector) CombineCollectorsNoChanges(verifyMerkleProof bool) Counters { +func (bcc *BatchCounterCollector) CombineCollectorsNoChanges() Counters { // combine all the counters we have so far // if we have external coutners use them, otherwise create new diff --git a/smt/pkg/db/mdbx.go b/smt/pkg/db/mdbx.go index 6060a09274c..a034cdf5aa1 100644 --- a/smt/pkg/db/mdbx.go +++ b/smt/pkg/db/mdbx.go @@ -30,6 +30,8 @@ const TableAccountValues = "HermezSmtAccountValues" const TableMetadata = "HermezSmtMetadata" const TableHashKey = "HermezSmtHashKey" +var HermezSmtTables = []string{TableSmt, TableStats, TableAccountValues, TableMetadata, TableHashKey} + type EriDb struct { kvTx kv.RwTx tx SmtDbTx diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index b01393819b2..271aa9b2b89 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -139,10 +139,11 @@ func NewSequencerZkStages(ctx context.Context, cfg.Zk, txPool, txPoolDb, + verifier, ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), - zkStages.StageSequencerExecutorVerifyCfg(db, verifier, txPool, controlServer.ChainConfig, cfg.Zk), + // zkStages.StageSequencerExecutorVerifyCfg(db, verifier, txPool, controlServer.ChainConfig, cfg.Zk), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 65f768d85c8..ef40d577c3d 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -48,6 +48,44 @@ const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch const LOCAL_EXIT_ROOTS = "local_exit_roots" // l2 block number -> local exit root const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch +const JUST_UNWOUND = "just_unwound" // batch number -> true + +var HermezDbTables = []string{ + L1VERIFICATIONS, + L1SEQUENCES, + FORKIDS, + FORKID_BLOCK, + BLOCKBATCHES, + GLOBAL_EXIT_ROOTS, + BLOCK_GLOBAL_EXIT_ROOTS, + GLOBAL_EXIT_ROOTS_BATCHES, + TX_PRICE_PERCENTAGE, + STATE_ROOTS, + L1_INFO_TREE_UPDATES, + L1_INFO_TREE_UPDATES_BY_GER, + BLOCK_L1_INFO_TREE_INDEX, + L1_INJECTED_BATCHES, + BLOCK_INFO_ROOTS, + L1_BLOCK_HASHES, + BLOCK_L1_BLOCK_HASHES, + L1_BLOCK_HASH_GER, + INTERMEDIATE_TX_STATEROOTS, + BATCH_WITNESSES, + BATCH_COUNTERS, + L1_BATCH_DATA, + REUSED_L1_INFO_TREE_INDEX, + LATEST_USED_GER, + BATCH_BLOCKS, + SMT_DEPTHS, + L1_INFO_LEAVES, + L1_INFO_ROOTS, + INVALID_BATCHES, + BATCH_PARTIALLY_PROCESSED, + LOCAL_EXIT_ROOTS, + ROllUP_TYPES_FORKS, + FORK_HISTORY, + JUST_UNWOUND, +} type HermezDb struct { tx kv.RwTx @@ -72,42 +110,7 @@ func NewHermezDb(tx kv.RwTx) *HermezDb { } func CreateHermezBuckets(tx kv.RwTx) error { - tables := []string{ - L1VERIFICATIONS, - L1SEQUENCES, - FORKIDS, - FORKID_BLOCK, - BLOCKBATCHES, - GLOBAL_EXIT_ROOTS, - BLOCK_GLOBAL_EXIT_ROOTS, - GLOBAL_EXIT_ROOTS_BATCHES, - TX_PRICE_PERCENTAGE, - STATE_ROOTS, - L1_INFO_TREE_UPDATES, - L1_INFO_TREE_UPDATES_BY_GER, - BLOCK_L1_INFO_TREE_INDEX, - L1_INJECTED_BATCHES, - BLOCK_INFO_ROOTS, - L1_BLOCK_HASHES, - BLOCK_L1_BLOCK_HASHES, - L1_BLOCK_HASH_GER, - INTERMEDIATE_TX_STATEROOTS, - BATCH_WITNESSES, - BATCH_COUNTERS, - L1_BATCH_DATA, - REUSED_L1_INFO_TREE_INDEX, - LATEST_USED_GER, - BATCH_BLOCKS, - SMT_DEPTHS, - L1_INFO_LEAVES, - L1_INFO_ROOTS, - INVALID_BATCHES, - BATCH_PARTIALLY_PROCESSED, - LOCAL_EXIT_ROOTS, - ROllUP_TYPES_FORKS, - FORK_HISTORY, - } - for _, t := range tables { + for _, t := range HermezDbTables { if err := tx.CreateBucket(t); err != nil { return err } @@ -169,6 +172,14 @@ func (db *HermezDbReader) GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error) return blocks, nil } +func concatenateBlockNumbers(blocks []uint64) []byte { + v := make([]byte, len(blocks)*8) + for i, block := range blocks { + copy(v[i*8:(i+1)*8], Uint64ToBytes(block)) + } + return v +} + func parseConcatenatedBlockNumbers(v []byte) []uint64 { count := len(v) / 8 blocks := make([]uint64, count) @@ -880,30 +891,46 @@ func (db *HermezDb) DeleteBlockL1InfoTreeIndexes(fromBlockNum, toBlockNum uint64 func (db *HermezDb) DeleteBlockBatches(fromBlockNum, toBlockNum uint64) error { // first, gather batch numbers related to the blocks we're about to delete batchNos := make([]uint64, 0) - c, err := db.tx.Cursor(BLOCKBATCHES) - if err != nil { - return err - } - defer c.Close() - var k, v []byte - for k, v, err = c.First(); k != nil; k, v, err = c.Next() { + // find all the batches involved + for i := fromBlockNum; i <= toBlockNum; i++ { + batch, err := db.GetBatchNoByL2Block(i) if err != nil { - break + return err + } + found := false + for _, b := range batchNos { + if b == batch { + found = true + break + } } - blockNum := BytesToUint64(k) - if blockNum >= fromBlockNum && blockNum <= toBlockNum { - batchNo := BytesToUint64(v) - batchNos = append(batchNos, batchNo) + if !found { + batchNos = append(batchNos, batch) } } - // now delete the batch -> block records + // now for each batch go and get the block numbers and remove them from the batch to block records for _, batchNo := range batchNos { - err := db.tx.Delete(BATCH_BLOCKS, Uint64ToBytes(batchNo)) + data, err := db.tx.GetOne(BATCH_BLOCKS, Uint64ToBytes(batchNo)) if err != nil { return err } + blockNos := parseConcatenatedBlockNumbers(data) + + // make a new list excluding the blocks in our range + newBlockNos := make([]uint64, 0) + for _, blockNo := range blockNos { + if blockNo < fromBlockNum || blockNo > toBlockNum { + newBlockNos = append(newBlockNos, blockNo) + } + } + + // concatenate the block numbers back again + newData := concatenateBlockNumbers(newBlockNos) + + // now store it back + err = db.tx.Put(BATCH_BLOCKS, Uint64ToBytes(batchNo), newData) } return db.deleteFromBucketWithUintKeysRange(BLOCKBATCHES, fromBlockNum, toBlockNum) @@ -1588,3 +1615,19 @@ func (db *HermezDbReader) GetAllForkHistory() ([]uint64, []uint64, error) { return forks, batches, nil } + +func (db *HermezDb) WriteJustUnwound(batch uint64) error { + return db.tx.Put(JUST_UNWOUND, Uint64ToBytes(batch), []byte{1}) +} + +func (db *HermezDb) DeleteJustUnwound(batch uint64) error { + return db.tx.Delete(JUST_UNWOUND, Uint64ToBytes(batch)) +} + +func (db *HermezDb) GetJustUnwound(batch uint64) (bool, error) { + v, err := db.tx.GetOne(JUST_UNWOUND, Uint64ToBytes(batch)) + if err != nil { + return false, err + } + return len(v) > 0, nil +} diff --git a/zk/legacy_executor_verifier/executor.go b/zk/legacy_executor_verifier/executor.go index 5bb839af274..f60b16a6471 100644 --- a/zk/legacy_executor_verifier/executor.go +++ b/zk/legacy_executor_verifier/executor.go @@ -230,12 +230,12 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com "match", match, "grpcUrl", e.grpcUrl, "batch", request.BatchNumber, + "blocks-count", len(resp.BlockResponses), "counters", counters, "exec-root", common.BytesToHash(resp.NewStateRoot), "our-root", request.StateRoot, "exec-old-root", common.BytesToHash(resp.OldStateRoot), - "our-old-root", oldStateRoot, - "blocks-count", len(resp.BlockResponses)) + "our-old-root", oldStateRoot) for addr, all := range resp.ReadWriteAddresses { log.Debug("executor result", diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index a0631c2bc25..68a351b90fc 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -31,15 +31,17 @@ var ErrNoExecutorAvailable = fmt.Errorf("no executor available") type VerifierRequest struct { BatchNumber uint64 + BlockNumber uint64 ForkId uint64 StateRoot common.Hash Counters map[string]int creationTime time.Time } -func NewVerifierRequest(batchNumber, forkId uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { +func NewVerifierRequest(batchNumber, blockNumber, forkId uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { return &VerifierRequest{ BatchNumber: batchNumber, + BlockNumber: blockNumber, ForkId: forkId, StateRoot: stateRoot, Counters: counters, @@ -47,27 +49,29 @@ func NewVerifierRequest(batchNumber, forkId uint64, stateRoot common.Hash, count } } -func (vr *VerifierRequest) isOverdue() bool { +func (vr *VerifierRequest) IsOverdue() bool { return time.Since(vr.creationTime) > time.Duration(30*time.Minute) } type VerifierResponse struct { BatchNumber uint64 + BlockNumber uint64 Valid bool Witness []byte ExecutorResponse *executor.ProcessBatchResponseV2 + OriginalCounters map[string]int Error error } type VerifierBundle struct { - request *VerifierRequest - response *VerifierResponse + Request *VerifierRequest + Response *VerifierResponse } func NewVerifierBundle(request *VerifierRequest, response *VerifierResponse) *VerifierBundle { return &VerifierBundle{ - request: request, - response: response, + Request: request, + Response: response, } } @@ -150,7 +154,7 @@ func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, } - e := v.getNextOnlineAvailableExecutor() + e := v.GetNextOnlineAvailableExecutor() if e == nil { return ErrNoExecutorAvailable } @@ -177,7 +181,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { verifierBundle := NewVerifierBundle(request, nil) - e := v.getNextOnlineAvailableExecutor() + e := v.GetNextOnlineAvailableExecutor() if e == nil { return verifierBundle, ErrNoExecutorAvailable } @@ -289,7 +293,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } } - verifierBundle.response = &VerifierResponse{ + verifierBundle.Response = &VerifierResponse{ BatchNumber: request.BatchNumber, Valid: ok, Witness: witness, @@ -307,6 +311,117 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ return promise } +type VerifierBundleWithBlocks struct { + Blocks []uint64 + Bundle *VerifierBundle +} + +func (v *LegacyExecutorVerifier) CreateAsyncPromise( + request *VerifierRequest, + blockNumbers []uint64, +) *Promise[*VerifierBundleWithBlocks] { + // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later + // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil + promise := NewPromise[*VerifierBundleWithBlocks](func() (*VerifierBundleWithBlocks, error) { + verifierBundle := NewVerifierBundle(request, nil) + bundleWithBlocks := &VerifierBundleWithBlocks{ + Blocks: blockNumbers, + Bundle: verifierBundle, + } + + e := v.GetNextOnlineAvailableExecutor() + if e == nil { + return bundleWithBlocks, ErrNoExecutorAvailable + } + + e.AquireAccess() + defer e.ReleaseAccess() + if v.cancelAllVerifications.Load() { + return nil, ErrPromiseCancelled + } + + var err error + ctx := context.Background() + // mapmutation has some issue with us not having a quit channel on the context call to `Done` so + // here we're creating a cancelable context and just deferring the cancel + innerCtx, cancel := context.WithCancel(ctx) + defer cancel() + + tx, err := v.db.BeginRo(innerCtx) + if err != nil { + return bundleWithBlocks, err + } + defer tx.Rollback() + + hermezDb := hermez_db.NewHermezDbReader(tx) + + l1InfoTreeMinTimestamps := make(map[uint64]uint64) + streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blockNumbers, hermezDb, l1InfoTreeMinTimestamps, nil) + if err != nil { + return bundleWithBlocks, err + } + + witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) + if err != nil { + return nil, err + } + + log.Debug("witness generated", "data", hex.EncodeToString(witness)) + + // now we need to figure out the timestamp limit for this payload. It must be: + // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp + // so to ensure we have a good value we can take the timestamp of the last block in the batch + // and just add 5 minutes + lastBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[len(blockNumbers)-1]) + if err != nil { + return bundleWithBlocks, err + } + + // executor is perfectly happy with just an empty hash here + oldAccInputHash := common.HexToHash("0x0") + timestampLimit := lastBlock.Time() + payload := &Payload{ + Witness: witness, + DataStream: streamBytes, + Coinbase: v.cfg.AddressSequencer.String(), + OldAccInputHash: oldAccInputHash.Bytes(), + L1InfoRoot: nil, + TimestampLimit: timestampLimit, + ForcedBlockhashL1: []byte{0}, + ContextId: strconv.FormatUint(request.BatchNumber, 10), + L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, + } + + previousBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[0]-1) + if err != nil { + return bundleWithBlocks, err + } + + ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) + if executorErr != nil { + if errors.Is(executorErr, ErrExecutorStateRootMismatch) { + log.Error("[Verifier] State root mismatch detected", "err", executorErr) + } else if errors.Is(executorErr, ErrExecutorUnknownError) { + log.Error("[Verifier] Unexpected error found from executor", "err", executorErr) + } else { + log.Error("[Verifier] Error", "err", executorErr) + } + } + + verifierBundle.Response = &VerifierResponse{ + BatchNumber: request.BatchNumber, + BlockNumber: request.BlockNumber, + Valid: ok, + Witness: witness, + ExecutorResponse: executorResponse, + Error: executorErr, + } + return bundleWithBlocks, nil + }) + + return promise +} + func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { t := utils.StartTimer("legacy-executor-verifier", "check-and-write-to-stream") defer t.LogTimer() @@ -374,15 +489,15 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([ log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) // this is an error on our end, so just re-create the promise at exact position where it was - if verifierBundle.request.isOverdue() { - return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.request.BatchNumber) + if verifierBundle.Request.IsOverdue() { + return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) } v.promises[i] = NewPromise[*VerifierBundle](v.promises[i].task) break } - verifierResponse := verifierBundle.response + verifierResponse := verifierBundle.Response results = append(results, verifierResponse) delete(v.addedBatches, verifierResponse.BatchNumber) @@ -392,9 +507,6 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([ } } - // leave only non-processed promises - // v.promises = v.promises[len(results):] - return results, nil } @@ -448,7 +560,7 @@ func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *her return nil } -func (v *LegacyExecutorVerifier) getNextOnlineAvailableExecutor() ILegacyExecutor { +func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() ILegacyExecutor { var exec ILegacyExecutor // TODO: find executors with spare capacity diff --git a/zk/legacy_executor_verifier/promise.go b/zk/legacy_executor_verifier/promise.go index 33a6b2ab26c..b374b730945 100644 --- a/zk/legacy_executor_verifier/promise.go +++ b/zk/legacy_executor_verifier/promise.go @@ -40,6 +40,26 @@ func NewPromise[T any](task func() (T, error)) *Promise[T] { return p } +func NewPromiseSync[T any](task func() (T, error)) *Promise[T] { + p := &Promise[T]{} + + result, err := task() + p.mutex.Lock() + defer p.mutex.Unlock() // this will be the first defer that is executed when the function retunrs + + if p.cancelled { + err = ErrPromiseCancelled + } else { + p.result = result + p.err = err + } + + if err != nil { + p.task = task + } + return p +} + func (p *Promise[T]) Get() (T, error) { p.wg.Wait() // .Wait ensures that all memory operations before .Done are visible after .Wait => no need to lock/unlock the mutex return p.result, p.err @@ -56,3 +76,7 @@ func (p *Promise[T]) Cancel() { defer p.mutex.Unlock() p.cancelled = true } + +func (p *Promise[T]) Task() func() (T, error) { + return p.task +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 288fdd43a57..af156e9bdbf 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -29,7 +29,7 @@ var SpecialZeroIndexHash = common.HexToHash("0x27AE5BA08D7291C96C8CBDDCC148BF48A func SpawnSequencingStage( s *stagedsync.StageState, u stagedsync.Unwinder, - tx kv.RwTx, + rootTx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, quiet bool, @@ -38,29 +38,29 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Starting sequencing stage", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Finished sequencing stage", logPrefix)) - freshTx := tx == nil + freshTx := rootTx == nil if freshTx { - tx, err = cfg.db.BeginRw(ctx) + rootTx, err = cfg.db.BeginRw(ctx) if err != nil { return err } - defer tx.Rollback() + defer rootTx.Rollback() } - sdb := newStageDb(tx) - l1Recovery := cfg.zk.L1SyncStartBlock > 0 - executionAt, err := s.ExecutionAt(tx) + executionAt, err := s.ExecutionAt(rootTx) if err != nil { return err } - lastBatch, err := stages.GetStageProgress(tx, stages.HighestSeenBatchNumber) + lastBatch, err := stages.GetStageProgress(rootTx, stages.HighestSeenBatchNumber) if err != nil { return err } + sdb := newStageDb(rootTx) + isLastBatchPariallyProcessed, err := sdb.hermezDb.GetIsBatchPartiallyProcessed(lastBatch) if err != nil { return err @@ -90,7 +90,7 @@ func SpawnSequencingStage( return err } - header, parentBlock, err := prepareHeader(tx, executionAt, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) + header, parentBlock, err := prepareHeader(rootTx, executionAt, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) if err != nil { return err } @@ -102,12 +102,12 @@ func SpawnSequencingStage( return err } - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { + if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, rootTx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { return err } if freshTx { - if err = tx.Commit(); err != nil { + if err = rootTx.Commit(); err != nil { return err } } @@ -117,7 +117,7 @@ func SpawnSequencingStage( if !isLastBatchPariallyProcessed && !isLastEntryBatchEnd { log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, lastBatch)) - ler, err := utils.GetBatchLocalExitRootFromSCStorage(lastBatch, sdb.hermezDb.HermezDbReader, tx) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(lastBatch, sdb.hermezDb.HermezDbReader, rootTx) if err != nil { return err } @@ -171,6 +171,45 @@ func SpawnSequencingStage( } batchCounters := vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), intermediateUsedCounters) + // check if we just unwound from a bad executor response and if we did just close the batch here + instantClose, err := sdb.hermezDb.GetJustUnwound(thisBatch) + if err != nil { + return err + } + if instantClose { + if err = sdb.hermezDb.DeleteJustUnwound(thisBatch); err != nil { + return err + } + + // lets first check if we actually wrote any blocks in this batch + blocks, err := sdb.hermezDb.GetL2BlockNosByBatch(thisBatch) + if err != nil { + return err + } + + // only close this batch down if we actually made any progress in it, otherwise + // just continue processing as normal and recreate the batch from scratch + if len(blocks) > 0 { + if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, blocks[len(blocks)-1], batchCounters); err != nil { + return err + } + if err = stages.SaveStageProgress(rootTx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + return err + } + if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { + return err + } + + if freshTx { + if err = rootTx.Commit(); err != nil { + return err + } + } + + return nil + } + } + runLoopBlocks := true lastStartedBn := executionAt - 1 yielded := mapset.NewSet[[32]byte]() @@ -247,14 +286,14 @@ func SpawnSequencingStage( if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { return err } - if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + if err = stages.SaveStageProgress(rootTx, stages.HighestSeenBatchNumber, thisBatch); err != nil { return err } if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { return err } if freshTx { - if err = tx.Commit(); err != nil { + if err = rootTx.Commit(); err != nil { return err } } @@ -268,10 +307,23 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, thisBatch, executionAt)) } + batchVerifier := NewBatchVerifier(cfg.zk, hasExecutorForThisBatch, cfg.legacyVerifier, forkId) + streamWriter := &SequencerBatchStreamWriter{ + ctx: ctx, + db: cfg.db, + logPrefix: logPrefix, + batchVerifier: batchVerifier, + sdb: sdb, + streamServer: cfg.datastreamServer, + hasExecutors: hasExecutorForThisBatch, + lastBatch: lastBatch, + } + blockDataSizeChecker := NewBlockDataChecker() - prevHeader := rawdb.ReadHeaderByNumber(tx, executionAt) + prevHeader := rawdb.ReadHeaderByNumber(rootTx, executionAt) batchDataOverflow := false + var builtBlocks []uint64 var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { @@ -302,7 +354,7 @@ func SpawnSequencingStage( effectiveGases = []uint8{} addedExecutionResults := []*core.ExecutionResult{} - header, parentBlock, err = prepareHeader(tx, blockNumber-1, deltaTimestamp, limboHeaderTimestamp, forkId, nextBatchData.Coinbase) + header, parentBlock, err = prepareHeader(rootTx, blockNumber-1, deltaTimestamp, limboHeaderTimestamp, forkId, nextBatchData.Coinbase) if err != nil { return err } @@ -520,78 +572,74 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(addedTransactions))) } - if !hasExecutorForThisBatch { - // save counters midbatch - // here they shouldn't add more to counters other than what they already have - // because it would be later added twice - counters := batchCounters.CombineCollectorsNoChanges(l1InfoIndex != 0) + err = sdb.hermezDb.WriteBatchCounters(thisBatch, batchCounters.CombineCollectorsNoChanges().UsedAsMap()) + if err != nil { + return err + } - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { - return err - } + err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch) + if err != nil { + return err + } - if err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err - } + if err = rootTx.Commit(); err != nil { + return err + } + rootTx, err = cfg.db.BeginRw(ctx) + if err != nil { + return err + } + defer rootTx.Rollback() + sdb.SetTx(rootTx) + + lastBatch = thisBatch - if err = cfg.datastreamServer.WriteBlockWithBatchStartToStream(logPrefix, tx, sdb.hermezDb, forkId, thisBatch, lastBatch, *parentBlock, *block); err != nil { + // add a check to the verifier and also check for responses + builtBlocks = append(builtBlocks, blockNumber) + batchVerifier.AddNewCheck(thisBatch, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), builtBlocks) + + // check for new responses from the verifier + needsUnwind, _, err := checkStreamWriterForUpdates(logPrefix, sdb.tx, streamWriter, forkId, u) + if err != nil { + return err + } + if needsUnwind { + if err = sdb.hermezDb.WriteJustUnwound(thisBatch); err != nil { return err } - - if err = tx.Commit(); err != nil { + // capture the fork otherwise when the loop starts again to close + // off the batch it will detect it as a fork upgrade + if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { return err } - if tx, err = cfg.db.BeginRw(ctx); err != nil { + // re-commit the tx so that we can capture this the next time the stage starts + if err = rootTx.Commit(); err != nil { return err } - // TODO: This creates stacked up deferrals - defer tx.Rollback() - sdb.SetTx(tx) - - lastBatch = thisBatch + return nil } } - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) - if err != nil { - return err - } - - counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) - if err != nil { - return err - } - - log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { - return err - } - - if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err + for { + needsUnwind, remaining, err := checkStreamWriterForUpdates(logPrefix, sdb.tx, streamWriter, forkId, u) + if err != nil { + return err + } + if needsUnwind { + return nil + } + if remaining == 0 { + break + } + time.Sleep(50 * time.Millisecond) } - // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch - ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, tx) - if err != nil { - return err - } - // write ler to hermezdb - if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, lastStartedBn, batchCounters); err != nil { return err } - log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) - - if !hasExecutorForThisBatch { - blockRoot := block.Root() - if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, lastBatch, &blockRoot, &ler); err != nil { - return err - } - } - if freshTx { - if err = tx.Commit(); err != nil { + if err = rootTx.Commit(); err != nil { return err } } diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go new file mode 100644 index 00000000000..321eda583a1 --- /dev/null +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -0,0 +1,87 @@ +package stages + +import ( + "context" + + "github.com/gateway-fm/cdk-erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/zk/datastream/server" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" +) + +type SequencerBatchStreamWriter struct { + ctx context.Context + db kv.RwDB + logPrefix string + batchVerifier *BatchVerifier + sdb *stageDb + streamServer *server.DataStreamServer + hasExecutors bool + lastBatch uint64 +} + +type BlockStatus struct { + BlockNumber uint64 + Valid bool + Error error +} + +func (sbc *SequencerBatchStreamWriter) CheckAndCommitUpdates(forkId uint64) ([]BlockStatus, int, error) { + var written []BlockStatus + responses, remaining, err := sbc.batchVerifier.CheckProgress() + if err != nil { + return written, remaining, err + } + + if len(responses) == 0 { + return written, remaining, nil + } + + written, err = sbc.writeBlockDetails(responses, forkId) + if err != nil { + return written, remaining, err + } + + return written, remaining, nil +} + +func (sbc *SequencerBatchStreamWriter) writeBlockDetails(verifiedBundles []*verifier.VerifierBundle, forkId uint64) ([]BlockStatus, error) { + var written []BlockStatus + for _, bundle := range verifiedBundles { + response := bundle.Response + + if response.Valid { + parentBlock, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, response.BlockNumber-1) + if err != nil { + return written, err + } + block, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, response.BlockNumber) + if err != nil { + return written, err + } + + if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, forkId, response.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { + return written, err + } + + // once we have handled the very first block we can update the last batch to be the current batch safely so that + // we don't keep adding batch bookmarks in between blocks + sbc.lastBatch = response.BatchNumber + } + + status := BlockStatus{ + BlockNumber: response.BlockNumber, + Valid: response.Valid, + Error: response.Error, + } + + written = append(written, status) + + // just break early if there is an invalid response as we don't want to process the remainder anyway + if !response.Valid { + break + } + } + + return written, nil +} diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go new file mode 100644 index 00000000000..84df2a5bdf3 --- /dev/null +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -0,0 +1,106 @@ +package stages + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/ledgerwatch/erigon/chain" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/txpool" + "github.com/ledgerwatch/log/v3" +) + +func handleLimbo( + logPrefix string, + sdb *stageDb, + batchNo uint64, + forkId uint64, + verifier *verifier.LegacyExecutorVerifier, + response *verifier.VerifierResponse, + pool *txpool.TxPool, + chainConfig *chain.Config, +) error { + + blockNumbers, err := sdb.hermezDb.GetL2BlockNosByBatch(batchNo) + if err != nil { + return err + } + if len(blockNumbers) == 0 { + panic("failing to verify a batch without blocks") + } + sort.Slice(blockNumbers, func(i, j int) bool { + return blockNumbers[i] < blockNumbers[j] + }) + + var lowestBlock, highestBlock *types.Block + + l1InfoTreeMinTimestamps := make(map[uint64]uint64) + _, err = verifier.GetWholeBatchStreamBytes(batchNo, sdb.tx, blockNumbers, sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil) + if err != nil { + return err + } + + limboSendersToPreviousTxMap := make(map[string]uint32) + limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() + + limboDetails := txpool.NewLimboBatchDetails() + limboDetails.Witness = response.Witness + limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps + limboDetails.BatchNumber = response.BatchNumber + limboDetails.ForkId = forkId + + for _, blockNumber := range blockNumbers { + block, err := rawdb.ReadBlockByNumber(sdb.tx, blockNumber) + if err != nil { + return err + } + highestBlock = block + if lowestBlock == nil { + // capture the first block, then we can set the bad block hash in the unwind to terminate the + // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run + lowestBlock = block + } + + for i, transaction := range block.Transactions() { + var b []byte + buffer := bytes.NewBuffer(b) + err = transaction.EncodeRLP(buffer) + if err != nil { + return err + } + + signer := types.MakeSigner(chainConfig, blockNumber) + sender, err := transaction.Sender(*signer) + if err != nil { + return err + } + senderMapKey := sender.Hex() + + blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) + streamBytes, err := verifier.GetWholeBatchStreamBytes(response.BatchNumber, sdb.tx, blocksForStreamBytes, sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + if err != nil { + return err + } + + previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] + if !ok { + previousTxIndex = math.MaxUint32 + } + + hash := transaction.Hash() + limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) + limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 + + log.Info(fmt.Sprintf("[%s] adding transaction to limbo", logPrefix, "hash", hash)) + } + } + + limboDetails.TimestampLimit = highestBlock.Time() + limboDetails.FirstBlockNumber = lowestBlock.NumberU64() + pool.ProcessLimboBatchDetails(limboDetails) + return nil +} diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 4fb8077e856..d4f68491085 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -34,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" "github.com/ledgerwatch/erigon/zk/tx" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/txpool" @@ -86,6 +87,8 @@ type SequenceBlockCfg struct { txPool *txpool.TxPool txPoolDb kv.RwDB + + legacyVerifier *verifier.LegacyExecutorVerifier } func StageSequenceBlocksCfg( @@ -111,6 +114,7 @@ func StageSequenceBlocksCfg( txPool *txpool.TxPool, txPoolDb kv.RwDB, + legacyVerifier *verifier.LegacyExecutorVerifier, ) SequenceBlockCfg { return SequenceBlockCfg{ @@ -135,6 +139,7 @@ func StageSequenceBlocksCfg( zk: zk, txPool: txPool, txPoolDb: txPoolDb, + legacyVerifier: legacyVerifier, } } @@ -491,3 +496,90 @@ func (bdc *BlockDataChecker) AddTransactionData(txL2Data []byte) bool { return false } + +func checkStreamWriterForUpdates( + logPrefix string, + tx kv.Tx, + streamWriter *SequencerBatchStreamWriter, + forkId uint64, + u stagedsync.Unwinder, +) (bool, int, error) { + committed, remaining, err := streamWriter.CheckAndCommitUpdates(forkId) + if err != nil { + return false, remaining, err + } + for _, commit := range committed { + if !commit.Valid { + unwindTo := commit.BlockNumber - 1 + + // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block + // causing the unwind. + unwindHeader := rawdb.ReadHeaderByNumber(tx, commit.BlockNumber) + if unwindHeader == nil { + return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) + } + + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back to block", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + + u.UnwindTo(unwindTo, unwindHeader.Hash()) + return true, 0, nil + } + } + + return false, remaining, nil +} + +func runBatchLastSteps( + logPrefix string, + datastreamServer *server.DataStreamServer, + sdb *stageDb, + thisBatch uint64, + lastStartedBn uint64, + batchCounters *vm.BatchCounterCollector, +) error { + l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) + if err != nil { + return err + } + + counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) + + if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { + return err + } + if err := sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + return err + } + + // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch + ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) + if err != nil { + return err + } + // write ler to hermezdb + if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + return err + } + + lastBlock, err := sdb.hermezDb.GetHighestBlockInBatch(thisBatch) + if err != nil { + return err + } + block, err := rawdb.ReadBlockByNumber(sdb.tx, lastBlock) + if err != nil { + return err + } + blockRoot := block.Root() + if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) + + return nil +} diff --git a/zk/stages/stage_sequence_execute_verifier.go b/zk/stages/stage_sequence_execute_verifier.go new file mode 100644 index 00000000000..c07b5e8ec92 --- /dev/null +++ b/zk/stages/stage_sequence_execute_verifier.go @@ -0,0 +1,191 @@ +package stages + +import ( + "errors" + "fmt" + "math/rand" + "sync" + + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/ledgerwatch/erigon/eth/ethconfig" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/log/v3" +) + +type PromiseWithBlocks struct { + Promise *verifier.Promise[*verifier.VerifierBundleWithBlocks] + Blocks []uint64 +} + +type BatchVerifier struct { + cfg *ethconfig.Zk + legacyVerifier *verifier.LegacyExecutorVerifier + hasExecutor bool + forkId uint64 + mtxPromises *sync.Mutex + promises []*PromiseWithBlocks + stop bool + errors chan error + finishCond *sync.Cond +} + +func NewBatchVerifier( + cfg *ethconfig.Zk, + hasExecutors bool, + legacyVerifier *verifier.LegacyExecutorVerifier, + forkId uint64, +) *BatchVerifier { + return &BatchVerifier{ + cfg: cfg, + hasExecutor: hasExecutors, + legacyVerifier: legacyVerifier, + forkId: forkId, + mtxPromises: &sync.Mutex{}, + promises: make([]*PromiseWithBlocks, 0), + errors: make(chan error), + finishCond: sync.NewCond(&sync.Mutex{}), + } +} + +func (bv *BatchVerifier) AddNewCheck( + batchNumber uint64, + blockNumber uint64, + stateRoot common.Hash, + counters map[string]int, + blockNumbers []uint64, +) { + request := verifier.NewVerifierRequest(batchNumber, blockNumber, bv.forkId, stateRoot, counters) + + var promise *PromiseWithBlocks + if bv.hasExecutor { + promise = bv.asyncPromise(request, blockNumbers) + } else { + promise = bv.syncPromise(request, blockNumbers) + } + + bv.appendPromise(promise) +} + +func (bv *BatchVerifier) WaitForFinish() { + count := 0 + bv.mtxPromises.Lock() + count = len(bv.promises) + bv.mtxPromises.Unlock() + + if count > 0 { + bv.finishCond.L.Lock() + bv.finishCond.Wait() + bv.finishCond.L.Unlock() + } +} + +func (bv *BatchVerifier) appendPromise(promise *PromiseWithBlocks) { + bv.mtxPromises.Lock() + defer bv.mtxPromises.Unlock() + bv.promises = append(bv.promises, promise) +} + +func (bv *BatchVerifier) CheckProgress() ([]*verifier.VerifierBundle, int, error) { + bv.mtxPromises.Lock() + defer bv.mtxPromises.Unlock() + + var responses []*verifier.VerifierBundle + + // not a stop signal, so we can start to process our promises now + processed := 0 + for idx, promise := range bv.promises { + bundleWithBlocks, err := promise.Promise.TryGet() + if bundleWithBlocks == nil && err == nil { + // nothing to process in this promise so we skip it + break + } + + if err != nil { + // let leave it for debug purposes + // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error + if errors.Is(err, verifier.ErrPromiseCancelled) { + panic("this should never happen") + } + + log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) + + if bundleWithBlocks == nil { + // we can't proceed here until this promise is attempted again + break + } + + if bundleWithBlocks.Bundle.Request.IsOverdue() { + // signal an error, the caller can check on this and stop the process if needs be + return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", bundleWithBlocks.Bundle.Request.BatchNumber) + } + + // re-queue the task - it should be safe to replace the index of the slice here as we only add to it + if bv.hasExecutor { + prom := bv.asyncPromise(bundleWithBlocks.Bundle.Request, bundleWithBlocks.Blocks) + bv.promises[idx] = prom + } else { + prom := bv.syncPromise(bundleWithBlocks.Bundle.Request, bundleWithBlocks.Blocks) + bv.promises[idx] = prom + } + + // break now as we know we can't proceed here until this promise is attempted again + break + } + + processed++ + responses = append(responses, bundleWithBlocks.Bundle) + } + + // remove processed promises from the list + remaining := bv.removeProcessedPromises(processed) + + return responses, remaining, nil +} + +func (bv *BatchVerifier) removeProcessedPromises(processed int) int { + count := len(bv.promises) + + if processed == 0 { + return count + } + + if processed == len(bv.promises) { + bv.promises = make([]*PromiseWithBlocks, 0) + return 0 + } + + bv.promises = bv.promises[processed:] + + return len(bv.promises) +} + +func (bv *BatchVerifier) syncPromise(request *verifier.VerifierRequest, blockNumbers []uint64) *PromiseWithBlocks { + // simulate a die roll to determine if this is a good batch or not + // 1 in 6 chance of being a bad batch + valid := true + if rand.Intn(6) == 0 { + valid = false + } + + promise := verifier.NewPromiseSync[*verifier.VerifierBundleWithBlocks](func() (*verifier.VerifierBundleWithBlocks, error) { + response := &verifier.VerifierResponse{ + BatchNumber: request.BatchNumber, + BlockNumber: request.BlockNumber, + Valid: valid, + OriginalCounters: request.Counters, + Witness: nil, + ExecutorResponse: nil, + Error: nil, + } + bundle := verifier.NewVerifierBundle(request, response) + return &verifier.VerifierBundleWithBlocks{Blocks: blockNumbers, Bundle: bundle}, nil + }) + + return &PromiseWithBlocks{Blocks: blockNumbers, Promise: promise} +} + +func (bv *BatchVerifier) asyncPromise(request *verifier.VerifierRequest, blockNumbers []uint64) *PromiseWithBlocks { + promise := bv.legacyVerifier.CreateAsyncPromise(request, blockNumbers) + + return &PromiseWithBlocks{Blocks: blockNumbers, Promise: promise} +} diff --git a/zk/stages/stage_sequencer_executor_verify.go b/zk/stages/stage_sequencer_executor_verify.go index 68299035c7d..769ba251fd5 100644 --- a/zk/stages/stage_sequencer_executor_verify.go +++ b/zk/stages/stage_sequencer_executor_verify.go @@ -2,26 +2,13 @@ package stages import ( "context" - "math" - "time" - - "bytes" - "errors" - "sort" - - "fmt" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/chain" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" "github.com/ledgerwatch/erigon/zk/txpool" - "github.com/ledgerwatch/log/v3" ) type SequencerExecutorVerifyCfg struct { @@ -56,278 +43,278 @@ func SpawnSequencerExecutorVerifyStage( cfg SequencerExecutorVerifyCfg, quiet bool, ) error { - logPrefix := s.LogPrefix() - log.Info(fmt.Sprintf("[%s] Starting sequencer verify stage", logPrefix)) - defer log.Info(fmt.Sprintf("[%s] Finished sequencer verify stage", logPrefix)) - - var err error - freshTx := tx == nil - if freshTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } + // logPrefix := s.LogPrefix() + // log.Info(fmt.Sprintf("[%s] Starting sequencer verify stage", logPrefix)) + // defer log.Info(fmt.Sprintf("[%s] Finished sequencer verify stage", logPrefix)) - hermezDb := hermez_db.NewHermezDb(tx) - hermezDbReader := hermez_db.NewHermezDbReader(tx) + // var err error + // freshTx := tx == nil + // if freshTx { + // tx, err = cfg.db.BeginRw(ctx) + // if err != nil { + // return err + // } + // defer tx.Rollback() + // } - // progress here is at the batch level - progress, err := stages.GetStageProgress(tx, stages.SequenceExecutorVerify) - if err != nil { - return err - } + // hermezDb := hermez_db.NewHermezDb(tx) + // hermezDbReader := hermez_db.NewHermezDbReader(tx) - // progress here is at the block level - executeProgress, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return err - } + // // progress here is at the batch level + // progress, err := stages.GetStageProgress(tx, stages.SequenceExecutorVerify) + // if err != nil { + // return err + // } - // we need to get the batch number for the latest block, so we can search for new batches to send for - // verification - latestBatch, err := hermezDb.GetBatchNoByL2Block(executeProgress) - if err != nil { - return err - } + // // progress here is at the block level + // executeProgress, err := stages.GetStageProgress(tx, stages.Execution) + // if err != nil { + // return err + // } - isBatchPartial, err := hermezDb.GetIsBatchPartiallyProcessed(latestBatch) - if err != nil { - return err - } - // we could be running in a state with no executors so we need instant response that we are in an - // ok state to save lag in the data stream !!Dragons: there will be no witnesses stored running in - // this mode of operation - canVerify := cfg.verifier.HasExecutorsUnsafe() - - // if batch was stopped intermediate and is not finished - we need to finish it first - // this shouldn't occur since exec stage is before that and should finish the batch - // but just in case something unexpected happens - if isBatchPartial { - log.Error(fmt.Sprintf("[%s] batch %d is not fully processed in stage_execute", logPrefix, latestBatch)) - canVerify = false - } + // // we need to get the batch number for the latest block, so we can search for new batches to send for + // // verification + // latestBatch, err := hermezDb.GetBatchNoByL2Block(executeProgress) + // if err != nil { + // return err + // } - if !canVerify { - if latestBatch == injectedBatchNumber { - return nil - } - - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, latestBatch); err != nil { - return err - } - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil - } + // isBatchPartial, err := hermezDb.GetIsBatchPartiallyProcessed(latestBatch) + // if err != nil { + // return err + // } + // // we could be running in a state with no executors so we need instant response that we are in an + // // ok state to save lag in the data stream !!Dragons: there will be no witnesses stored running in + // // this mode of operation + // canVerify := cfg.verifier.HasExecutorsUnsafe() + + // // if batch was stopped intermediate and is not finished - we need to finish it first + // // this shouldn't occur since exec stage is before that and should finish the batch + // // but just in case something unexpected happens + // if isBatchPartial { + // log.Error(fmt.Sprintf("[%s] batch %d is not fully processed in stage_execute", logPrefix, latestBatch)) + // canVerify = false + // } - // get ordered promises from the verifier - // NB: this call is where the stream write happens (so it will be delayed until this stage is run) - responses, err := cfg.verifier.ProcessResultsSequentiallyUnsafe(tx) - if err != nil { - //TODO: what happen with promises if this request returns here? - return err - } + // if !canVerify { + // if latestBatch == injectedBatchNumber { + // return nil + // } - for _, response := range responses { - // ensure that the first response is the next batch based on the current stage progress - // otherwise just return early until we get it - if response.BatchNumber != progress+1 { - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil - } - - // now check that we are indeed in a good state to continue - if !response.Valid { - if cfg.cfgZk.Limbo { - log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", s.LogPrefix()), "batch", response.BatchNumber) - // we have an invalid batch, so we need to notify the txpool that these transactions are spurious - // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the - // pool, but as it knows about these limbo transactions it will place them into limbo instead - // of queueing them again - - // now we need to figure out the highest block number in the batch - // and grab all the transaction hashes along the way to inform the - // pool of hashes to avoid - blockNumbers, err := hermezDb.GetL2BlockNosByBatch(response.BatchNumber) - if err != nil { - return err - } - if len(blockNumbers) == 0 { - panic("failing to verify a batch without blocks") - } - sort.Slice(blockNumbers, func(i, j int) bool { - return blockNumbers[i] < blockNumbers[j] - }) - - var lowestBlock, highestBlock *types.Block - forkId, err := hermezDb.GetForkId(response.BatchNumber) - if err != nil { - return err - } - - l1InfoTreeMinTimestamps := make(map[uint64]uint64) - if _, err = cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { - return err - } - - limboSendersToPreviousTxMap := make(map[string]uint32) - limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() - - limboDetails := txpool.NewLimboBatchDetails() - limboDetails.Witness = response.Witness - limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps - limboDetails.BatchNumber = response.BatchNumber - limboDetails.ForkId = forkId - - for _, blockNumber := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(tx, blockNumber) - if err != nil { - return err - } - highestBlock = block - if lowestBlock == nil { - // capture the first block, then we can set the bad block hash in the unwind to terminate the - // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run - lowestBlock = block - } - - for i, transaction := range block.Transactions() { - var b []byte - buffer := bytes.NewBuffer(b) - err = transaction.EncodeRLP(buffer) - if err != nil { - return err - } - - signer := types.MakeSigner(cfg.chainConfig, blockNumber) - sender, err := transaction.Sender(*signer) - if err != nil { - return err - } - senderMapKey := sender.Hex() - - blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - streamBytes, err := cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) - if err != nil { - return err - } - - previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] - if !ok { - previousTxIndex = math.MaxUint32 - } - - hash := transaction.Hash() - limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) - limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - - log.Info(fmt.Sprintf("[%s] adding transaction to limbo", s.LogPrefix()), "hash", hash) - } - } - - limboDetails.TimestampLimit = highestBlock.Time() - limboDetails.FirstBlockNumber = lowestBlock.NumberU64() - cfg.txPool.ProcessLimboBatchDetails(limboDetails) - - u.UnwindTo(lowestBlock.NumberU64()-1, lowestBlock.Hash()) - cfg.verifier.CancelAllRequestsUnsafe() - return nil - } else { - // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks - for { - time.Sleep(time.Minute) - log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", s.LogPrefix(), response.BatchNumber)) - } - } - } - - // all good so just update the stage progress for now - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, response.BatchNumber); err != nil { - return err - } - - // we know that if the batch has been marked as OK we can update the datastream progress to match - // as the verifier will have handled writing to the stream - highestBlock, err := hermezDb.GetHighestBlockInBatch(response.BatchNumber) - if err != nil { - return err - } - - if err = stages.SaveStageProgress(tx, stages.DataStream, highestBlock); err != nil { - return err - } - - // store the witness - errWitness := hermezDb.WriteWitness(response.BatchNumber, response.Witness) - if errWitness != nil { - log.Warn("Failed to write witness", "batch", response.BatchNumber, "err", errWitness) - } - - cfg.verifier.MarkTopResponseAsProcessed(response.BatchNumber) - progress = response.BatchNumber - } + // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, latestBatch); err != nil { + // return err + // } + // if freshTx { + // if err = tx.Commit(); err != nil { + // return err + // } + // } + // return nil + // } - // send off the new batches to the verifier to be processed - for batch := progress + 1; batch <= latestBatch; batch++ { - // we do not need to verify batch 1 as this is the injected batch so just updated progress and move on - if batch == injectedBatchNumber { - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchNumber); err != nil { - return err - } - } else { - if cfg.verifier.IsRequestAddedUnsafe(batch) { - continue - } - - // we need the state root of the last block in the batch to send to the executor - highestBlock, err := hermezDb.GetHighestBlockInBatch(batch) - if err != nil { - return err - } - if highestBlock == 0 { - // maybe nothing in this batch and we know we don't handle batch 0 (genesis) - continue - } - block, err := rawdb.ReadBlockByNumber(tx, highestBlock) - if err != nil { - return err - } - - counters, found, err := hermezDb.GetBatchCounters(batch) - if err != nil { - return err - } - if !found { - return errors.New("batch counters not found") - } - - forkId, err := hermezDb.GetForkId(batch) - if err != nil { - return err - } - - if forkId == 0 { - return errors.New("the network cannot have a 0 fork id") - } - - cfg.verifier.AddRequestUnsafe(legacy_executor_verifier.NewVerifierRequest(batch, forkId, block.Root(), counters), cfg.cfgZk.SequencerBatchSealTime) - } - } + // // get ordered promises from the verifier + // // NB: this call is where the stream write happens (so it will be delayed until this stage is run) + // responses, err := cfg.verifier.ProcessResultsSequentiallyUnsafe(tx) + // if err != nil { + // //TODO: what happen with promises if this request returns here? + // return err + // } - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } + // for _, response := range responses { + // // ensure that the first response is the next batch based on the current stage progress + // // otherwise just return early until we get it + // if response.BatchNumber != progress+1 { + // if freshTx { + // if err = tx.Commit(); err != nil { + // return err + // } + // } + // return nil + // } + + // // now check that we are indeed in a good state to continue + // if !response.Valid { + // if cfg.cfgZk.Limbo { + // log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", s.LogPrefix()), "batch", response.BatchNumber) + // // we have an invalid batch, so we need to notify the txpool that these transactions are spurious + // // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the + // // pool, but as it knows about these limbo transactions it will place them into limbo instead + // // of queueing them again + + // // now we need to figure out the highest block number in the batch + // // and grab all the transaction hashes along the way to inform the + // // pool of hashes to avoid + // blockNumbers, err := hermezDb.GetL2BlockNosByBatch(response.BatchNumber) + // if err != nil { + // return err + // } + // if len(blockNumbers) == 0 { + // panic("failing to verify a batch without blocks") + // } + // sort.Slice(blockNumbers, func(i, j int) bool { + // return blockNumbers[i] < blockNumbers[j] + // }) + + // var lowestBlock, highestBlock *types.Block + // forkId, err := hermezDb.GetForkId(response.BatchNumber) + // if err != nil { + // return err + // } + + // l1InfoTreeMinTimestamps := make(map[uint64]uint64) + // if _, err = cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { + // return err + // } + + // limboSendersToPreviousTxMap := make(map[string]uint32) + // limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() + + // limboDetails := txpool.NewLimboBatchDetails() + // limboDetails.Witness = response.Witness + // limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps + // limboDetails.BatchNumber = response.BatchNumber + // limboDetails.ForkId = forkId + + // for _, blockNumber := range blockNumbers { + // block, err := rawdb.ReadBlockByNumber(tx, blockNumber) + // if err != nil { + // return err + // } + // highestBlock = block + // if lowestBlock == nil { + // // capture the first block, then we can set the bad block hash in the unwind to terminate the + // // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run + // lowestBlock = block + // } + + // for i, transaction := range block.Transactions() { + // var b []byte + // buffer := bytes.NewBuffer(b) + // err = transaction.EncodeRLP(buffer) + // if err != nil { + // return err + // } + + // signer := types.MakeSigner(cfg.chainConfig, blockNumber) + // sender, err := transaction.Sender(*signer) + // if err != nil { + // return err + // } + // senderMapKey := sender.Hex() + + // blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) + // streamBytes, err := cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + // if err != nil { + // return err + // } + + // previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] + // if !ok { + // previousTxIndex = math.MaxUint32 + // } + + // hash := transaction.Hash() + // limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) + // limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 + + // log.Info(fmt.Sprintf("[%s] adding transaction to limbo", s.LogPrefix()), "hash", hash) + // } + // } + + // limboDetails.TimestampLimit = highestBlock.Time() + // limboDetails.FirstBlockNumber = lowestBlock.NumberU64() + // cfg.txPool.ProcessLimboBatchDetails(limboDetails) + + // u.UnwindTo(lowestBlock.NumberU64()-1, lowestBlock.Hash()) + // cfg.verifier.CancelAllRequestsUnsafe() + // return nil + // } else { + // // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks + // for { + // time.Sleep(time.Minute) + // log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", s.LogPrefix(), response.BatchNumber)) + // } + // } + // } + + // // all good so just update the stage progress for now + // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, response.BatchNumber); err != nil { + // return err + // } + + // // we know that if the batch has been marked as OK we can update the datastream progress to match + // // as the verifier will have handled writing to the stream + // highestBlock, err := hermezDb.GetHighestBlockInBatch(response.BatchNumber) + // if err != nil { + // return err + // } + + // if err = stages.SaveStageProgress(tx, stages.DataStream, highestBlock); err != nil { + // return err + // } + + // // store the witness + // errWitness := hermezDb.WriteWitness(response.BatchNumber, response.Witness) + // if errWitness != nil { + // log.Warn("Failed to write witness", "batch", response.BatchNumber, "err", errWitness) + // } + + // cfg.verifier.MarkTopResponseAsProcessed(response.BatchNumber) + // progress = response.BatchNumber + // } + + // // send off the new batches to the verifier to be processed + // for batch := progress + 1; batch <= latestBatch; batch++ { + // // we do not need to verify batch 1 as this is the injected batch so just updated progress and move on + // if batch == injectedBatchNumber { + // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchNumber); err != nil { + // return err + // } + // } else { + // if cfg.verifier.IsRequestAddedUnsafe(batch) { + // continue + // } + + // // we need the state root of the last block in the batch to send to the executor + // highestBlock, err := hermezDb.GetHighestBlockInBatch(batch) + // if err != nil { + // return err + // } + // if highestBlock == 0 { + // // maybe nothing in this batch and we know we don't handle batch 0 (genesis) + // continue + // } + // block, err := rawdb.ReadBlockByNumber(tx, highestBlock) + // if err != nil { + // return err + // } + + // counters, found, err := hermezDb.GetBatchCounters(batch) + // if err != nil { + // return err + // } + // if !found { + // return errors.New("batch counters not found") + // } + + // forkId, err := hermezDb.GetForkId(batch) + // if err != nil { + // return err + // } + + // if forkId == 0 { + // return errors.New("the network cannot have a 0 fork id") + // } + + // cfg.verifier.AddRequestUnsafe(legacy_executor_verifier.NewVerifierRequest(batch, forkId, block.Root(), counters), cfg.cfgZk.SequencerBatchSealTime) + // } + // } + + // if freshTx { + // if err = tx.Commit(); err != nil { + // return err + // } + // } return nil } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index a422ea52888..d94be29059d 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -21,7 +21,7 @@ func SequencerZkStages( exec SequenceBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, - sequencerExecutorVerifyCfg SequencerExecutorVerifyCfg, + // sequencerExecutorVerifyCfg SequencerExecutorVerifyCfg, history stages.HistoryCfg, logIndex stages.LogIndexCfg, callTraces stages.CallTracesCfg, @@ -128,19 +128,19 @@ func SequencerZkStages( return PruneSequencerInterhashesStage(p, tx, sequencerInterhashesCfg, ctx) }, }, - { - ID: stages2.SequenceExecutorVerify, - Description: "Sequencer, check batch with legacy executor", - Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencerExecutorVerifyStage(s, u, tx, ctx, sequencerExecutorVerifyCfg, quiet) - }, - Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { - return UnwindSequencerExecutorVerifyStage(u, s, tx, ctx, sequencerExecutorVerifyCfg) - }, - Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { - return PruneSequencerExecutorVerifyStage(p, tx, sequencerExecutorVerifyCfg, ctx) - }, - }, + // { + // ID: stages2.SequenceExecutorVerify, + // Description: "Sequencer, check batch with legacy executor", + // Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { + // return SpawnSequencerExecutorVerifyStage(s, u, tx, ctx, sequencerExecutorVerifyCfg, quiet) + // }, + // Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { + // return UnwindSequencerExecutorVerifyStage(u, s, tx, ctx, sequencerExecutorVerifyCfg) + // }, + // Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { + // return PruneSequencerExecutorVerifyStage(p, tx, sequencerExecutorVerifyCfg, ctx) + // }, + // }, { ID: stages2.HashState, Description: "Hash the key in the state", diff --git a/zk/txpool/pool_zk_limbo_processor.go b/zk/txpool/pool_zk_limbo_processor.go index d38b88ee2ee..667d63c51eb 100644 --- a/zk/txpool/pool_zk_limbo_processor.go +++ b/zk/txpool/pool_zk_limbo_processor.go @@ -2,16 +2,12 @@ package txpool import ( "context" - "math" "time" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/chain" - "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" - "github.com/ledgerwatch/log/v3" - "github.com/status-im/keycard-go/hexutils" ) type LimboSubPoolProcessor struct { @@ -51,55 +47,54 @@ func (_this *LimboSubPoolProcessor) StartWork() { } func (_this *LimboSubPoolProcessor) run() { - log.Info("[Limbo pool processor] Starting") - defer log.Info("[Limbo pool processor] End") - - ctx := context.Background() - limboBatchDetails := _this.txPool.GetLimboDetailsCloned() - - size := len(limboBatchDetails) - if size == 0 { - return - } - - for _, limboBatch := range limboBatchDetails { - for _, limboTx := range limboBatch.Transactions { - if !limboTx.hasRoot() { - return - } - } - } - - tx, err := _this.db.BeginRo(ctx) - if err != nil { - return - } - defer tx.Rollback() - - // we just need some counter variable with large used values in order verify not to complain - batchCounters := vm.NewBatchCounterCollector(256, 1, _this.zkCfg.VirtualCountersSmtReduction, true, nil) - unlimitedCounters := batchCounters.NewCounters().UsedAsMap() - for k := range unlimitedCounters { - unlimitedCounters[k] = math.MaxInt32 - } - - invalidTxs := []*string{} - - for _, limboBatch := range limboBatchDetails { - for _, limboTx := range limboBatch.Transactions { - request := legacy_executor_verifier.NewVerifierRequest(limboBatch.BatchNumber, limboBatch.ForkId, limboTx.Root, unlimitedCounters) - err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) - if err != nil { - idHash := hexutils.BytesToHex(limboTx.Hash[:]) - invalidTxs = append(invalidTxs, &idHash) - log.Info("[Limbo pool processor]", "invalid tx", limboTx.Hash, "err", err) - continue - } - - log.Info("[Limbo pool processor]", "valid tx", limboTx.Hash) - } - } - - _this.txPool.MarkProcessedLimboDetails(size, invalidTxs) - + // log.Info("[Limbo pool processor] Starting") + // defer log.Info("[Limbo pool processor] End") + + // ctx := context.Background() + // limboBatchDetails := _this.txPool.GetLimboDetailsCloned() + + // size := len(limboBatchDetails) + // if size == 0 { + // return + // } + + // for _, limboBatch := range limboBatchDetails { + // for _, limboTx := range limboBatch.Transactions { + // if !limboTx.hasRoot() { + // return + // } + // } + // } + + // tx, err := _this.db.BeginRo(ctx) + // if err != nil { + // return + // } + // defer tx.Rollback() + + // // we just need some counter variable with large used values in order verify not to complain + // batchCounters := vm.NewBatchCounterCollector(256, 1, _this.zkCfg.VirtualCountersSmtReduction, true, nil) + // unlimitedCounters := batchCounters.NewCounters().UsedAsMap() + // for k := range unlimitedCounters { + // unlimitedCounters[k] = math.MaxInt32 + // } + + // invalidTxs := []*string{} + + // for _, limboBatch := range limboBatchDetails { + // for _, limboTx := range limboBatch.Transactions { + // request := legacy_executor_verifier.NewVerifierRequest(limboBatch.BatchNumber, limboBatch.ForkId, limboTx.Root, unlimitedCounters) + // err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) + // if err != nil { + // idHash := hexutils.BytesToHex(limboTx.Hash[:]) + // invalidTxs = append(invalidTxs, &idHash) + // log.Info("[Limbo pool processor]", "invalid tx", limboTx.Hash, "err", err) + // continue + // } + + // log.Info("[Limbo pool processor]", "valid tx", limboTx.Hash) + // } + // } + + // _this.txPool.MarkProcessedLimboDetails(size, invalidTxs) } diff --git a/zk/utils/zk_tables.go b/zk/utils/zk_tables.go new file mode 100644 index 00000000000..6dd1b4ffbf9 --- /dev/null +++ b/zk/utils/zk_tables.go @@ -0,0 +1,30 @@ +package utils + +import ( + "github.com/gateway-fm/cdk-erigon-lib/kv" + "github.com/gateway-fm/cdk-erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +func PopulateMemoryMutationTables(batch *memdb.MemoryMutation) error { + for _, table := range hermez_db.HermezDbTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + for _, table := range db.HermezSmtTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + for _, table := range kv.ChaindataTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + return nil +} diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 6afc14f9a2a..70135ce4887 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -189,7 +189,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, blocks []*eri batch := memdb.NewMemoryBatch(tx, g.dirs.Tmp) defer batch.Rollback() - if err = populateDbTables(batch); err != nil { + if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { return nil, err } @@ -348,36 +348,3 @@ func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { } return buf.Bytes(), nil } - -func populateDbTables(batch *memdb.MemoryMutation) error { - tables := []string{ - db2.TableSmt, - db2.TableAccountValues, - db2.TableMetadata, - db2.TableHashKey, - db2.TableStats, - hermez_db.TX_PRICE_PERCENTAGE, - hermez_db.BLOCKBATCHES, - hermez_db.BATCH_BLOCKS, - hermez_db.BLOCK_GLOBAL_EXIT_ROOTS, - hermez_db.GLOBAL_EXIT_ROOTS_BATCHES, - hermez_db.STATE_ROOTS, - hermez_db.BATCH_WITNESSES, - hermez_db.L1_BLOCK_HASHES, - hermez_db.BLOCK_L1_BLOCK_HASHES, - hermez_db.INTERMEDIATE_TX_STATEROOTS, - hermez_db.REUSED_L1_INFO_TREE_INDEX, - hermez_db.LATEST_USED_GER, - hermez_db.L1_INFO_TREE_UPDATES_BY_GER, - hermez_db.SMT_DEPTHS, - hermez_db.INVALID_BATCHES, - } - - for _, t := range tables { - if err := batch.CreateBucket(t); err != nil { - return err - } - } - - return nil -} From 339d766355235509d3ac9b12d023e6f9e52dcdc3 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 08:49:04 +0000 Subject: [PATCH 02/33] manual "cherry-pick" a2b17f7ceff0ec41283d4f8b404af253855656dc --- zk/stages/stage_sequence_execute.go | 4 +-- .../stage_sequence_execute_data_stream.go | 2 +- zk/stages/stage_sequence_execute_utils.go | 25 +++++++++++++++---- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index af156e9bdbf..649fbbb9e28 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -599,7 +599,7 @@ func SpawnSequencingStage( batchVerifier.AddNewCheck(thisBatch, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), builtBlocks) // check for new responses from the verifier - needsUnwind, _, err := checkStreamWriterForUpdates(logPrefix, sdb.tx, streamWriter, forkId, u) + needsUnwind, _, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, forkId, thisBatch, u) if err != nil { return err } @@ -621,7 +621,7 @@ func SpawnSequencingStage( } for { - needsUnwind, remaining, err := checkStreamWriterForUpdates(logPrefix, sdb.tx, streamWriter, forkId, u) + needsUnwind, remaining, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, forkId, thisBatch, u) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index 321eda583a1..c15a1af7a39 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -26,7 +26,7 @@ type BlockStatus struct { Error error } -func (sbc *SequencerBatchStreamWriter) CheckAndCommitUpdates(forkId uint64) ([]BlockStatus, int, error) { +func (sbc *SequencerBatchStreamWriter) CommitNewUpdates(forkId uint64) ([]BlockStatus, int, error) { var written []BlockStatus responses, remaining, err := sbc.batchVerifier.CheckProgress() if err != nil { diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index d4f68491085..8fb68a7697b 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -497,29 +497,44 @@ func (bdc *BlockDataChecker) AddTransactionData(txL2Data []byte) bool { return false } -func checkStreamWriterForUpdates( +func updateStreamAndCheckRollback( logPrefix string, - tx kv.Tx, + sdb *stageDb, streamWriter *SequencerBatchStreamWriter, + batchNumber uint64, forkId uint64, u stagedsync.Unwinder, ) (bool, int, error) { - committed, remaining, err := streamWriter.CheckAndCommitUpdates(forkId) + committed, remaining, err := streamWriter.CommitNewUpdates(forkId) if err != nil { return false, remaining, err } for _, commit := range committed { if !commit.Valid { + // we are about to unwind so place the marker ready for this to happen + if err = sdb.hermezDb.WriteJustUnwound(batchNumber); err != nil { + return false, 0, err + } + // capture the fork otherwise when the loop starts again to close + // off the batch it will detect it as a fork upgrade + if err = sdb.hermezDb.WriteForkId(batchNumber, forkId); err != nil { + return false, 0, err + } + unwindTo := commit.BlockNumber - 1 // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block // causing the unwind. - unwindHeader := rawdb.ReadHeaderByNumber(tx, commit.BlockNumber) + unwindHeader := rawdb.ReadHeaderByNumber(sdb.tx, commit.BlockNumber) if unwindHeader == nil { return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) } - log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back to block", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + if err = sdb.tx.Commit(); err != nil { + return false, 0, err + } + + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) u.UnwindTo(unwindTo, unwindHeader.Hash()) return true, 0, nil From 93a420730a22ef9a1e73ba1ee892fb3fde8b40e1 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 09:22:08 +0000 Subject: [PATCH 03/33] manual "cherry-pick" a2b17f7ceff0ec41283d4f8b404af253855656dc --- zk/stages/stage_sequence_execute.go | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 649fbbb9e28..40158283a17 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -599,29 +599,17 @@ func SpawnSequencingStage( batchVerifier.AddNewCheck(thisBatch, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), builtBlocks) // check for new responses from the verifier - needsUnwind, _, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, forkId, thisBatch, u) + needsUnwind, _, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, thisBatch, forkId, u) if err != nil { return err } if needsUnwind { - if err = sdb.hermezDb.WriteJustUnwound(thisBatch); err != nil { - return err - } - // capture the fork otherwise when the loop starts again to close - // off the batch it will detect it as a fork upgrade - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { - return err - } - // re-commit the tx so that we can capture this the next time the stage starts - if err = rootTx.Commit(); err != nil { - return err - } return nil } } for { - needsUnwind, remaining, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, forkId, thisBatch, u) + needsUnwind, remaining, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, thisBatch, forkId, u) if err != nil { return err } From 4f7e4b8f10df7e9014504d1e605df73825cf73f0 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 14:39:17 +0000 Subject: [PATCH 04/33] prepare sequencer for new limbo recovery --- zk/l1_data/l1_decoder.go | 9 +- .../{sequencer.go => sequencer_env.go} | 0 zk/stages/stage_sequence_execute.go | 274 +++--------------- zk/stages/stage_sequence_execute_batch.go | 223 ++++++++++++++ zk/stages/stage_sequence_execute_blocks.go | 65 ++++- .../stage_sequence_execute_data_stream.go | 30 ++ .../stage_sequence_execute_injected_batch.go | 35 ++- .../stage_sequence_execute_l1recovery.go | 148 ++++++++++ .../stage_sequence_execute_transactions.go | 54 ---- zk/stages/stage_sequence_execute_utils.go | 210 +------------- zk/stages/stage_sequence_execute_utils_db.go | 63 ++++ 11 files changed, 604 insertions(+), 507 deletions(-) rename zk/sequencer/{sequencer.go => sequencer_env.go} (100%) create mode 100644 zk/stages/stage_sequence_execute_batch.go create mode 100644 zk/stages/stage_sequence_execute_l1recovery.go create mode 100644 zk/stages/stage_sequence_execute_utils_db.go diff --git a/zk/l1_data/l1_decoder.go b/zk/l1_data/l1_decoder.go index a9fcaf378ba..661a5e11f5e 100644 --- a/zk/l1_data/l1_decoder.go +++ b/zk/l1_data/l1_decoder.go @@ -6,15 +6,16 @@ import ( "fmt" "strings" + "encoding/binary" + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/gateway-fm/cdk-erigon-lib/common/length" "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/da" "github.com/ledgerwatch/erigon/zk/hermez_db" zktx "github.com/ledgerwatch/erigon/zk/tx" - "github.com/gateway-fm/cdk-erigon-lib/common/length" - "encoding/binary" ) type RollupBaseEtrogBatchData struct { @@ -168,8 +169,8 @@ type DecodedL1Data struct { LimitTimestamp uint64 } -func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (DecodedL1Data, error) { - decoded := DecodedL1Data{} +func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (*DecodedL1Data, error) { + decoded := &DecodedL1Data{} // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync // stage, if it is not there we need to panic as we're in a bad state batchData, err := reader.GetL1BatchData(batchNo) diff --git a/zk/sequencer/sequencer.go b/zk/sequencer/sequencer_env.go similarity index 100% rename from zk/sequencer/sequencer.go rename to zk/sequencer/sequencer_env.go diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 40158283a17..5bc07e7bed0 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -15,17 +15,12 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk" - "github.com/ledgerwatch/erigon/zk/l1_data" - zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" ) -var SpecialZeroIndexHash = common.HexToHash("0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757") - func SpawnSequencingStage( s *stagedsync.StageState, u stagedsync.Unwinder, @@ -38,29 +33,22 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Starting sequencing stage", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Finished sequencing stage", logPrefix)) - freshTx := rootTx == nil - if freshTx { - rootTx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer rootTx.Rollback() + sdb, err := newStageDb(ctx, rootTx, cfg.db) + if err != nil { + return err } + defer sdb.tx.Rollback() - l1Recovery := cfg.zk.L1SyncStartBlock > 0 - - executionAt, err := s.ExecutionAt(rootTx) + executionAt, err := s.ExecutionAt(sdb.tx) if err != nil { return err } - lastBatch, err := stages.GetStageProgress(rootTx, stages.HighestSeenBatchNumber) + lastBatch, err := stages.GetStageProgress(sdb.tx, stages.HighestSeenBatchNumber) if err != nil { return err } - sdb := newStageDb(rootTx) - isLastBatchPariallyProcessed, err := sdb.hermezDb.GetIsBatchPartiallyProcessed(lastBatch) if err != nil { return err @@ -71,65 +59,31 @@ func SpawnSequencingStage( return err } - getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) } - hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() - - // handle case where batch wasn't closed properly - // close it before starting a new one - // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted - // and datastream was regenerated - isLastEntryBatchEnd, err := cfg.datastreamServer.IsLastEntryBatchEnd() - if err != nil { - return err - } + l1Recovery := cfg.zk.L1SyncStartBlock > 0 // injected batch if executionAt == 0 { - // set the block height for the fork we're running at to ensure contract interactions are correct - if err = utils.RecoverySetBlockConfigForks(1, forkId, cfg.chainConfig, logPrefix); err != nil { + if err = processInjectedInitialBatch(ctx, cfg, s, sdb, forkId, l1Recovery); err != nil { return err } - header, parentBlock, err := prepareHeader(rootTx, executionAt, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) - if err != nil { + if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { return err } - getHashFn := core.GetHashFn(header, getHeader) - blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) - - if err = processInjectedInitialBatch(ctx, cfg, s, sdb, forkId, header, parentBlock, &blockContext, l1Recovery); err != nil { - return err - } - - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, rootTx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { + if err = sdb.tx.Commit(); err != nil { return err } - if freshTx { - if err = rootTx.Commit(); err != nil { - return err - } - } - return nil } - if !isLastBatchPariallyProcessed && !isLastEntryBatchEnd { - log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, lastBatch)) - ler, err := utils.GetBatchLocalExitRootFromSCStorage(lastBatch, sdb.hermezDb.HermezDbReader, rootTx) - if err != nil { - return err - } - - lastBlock, err := rawdb.ReadBlockByNumber(sdb.tx, executionAt) - if err != nil { - return err - } - root := lastBlock.Root() - if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, lastBatch, lastBatch-1, &root, &ler); err != nil { - return err - } + // handle case where batch wasn't closed properly + // close it before starting a new one + // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted + // and datastream was regenerated + if err = finalizeLastBatchInDatastreamIfNotFinalized(logPrefix, sdb, cfg.datastreamServer, lastBatch, executionAt); err != nil { + return err } if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, sdb.hermezDb, logPrefix); err != nil { @@ -139,87 +93,31 @@ func SpawnSequencingStage( var header *types.Header var parentBlock *types.Block - var decodedBlock zktx.DecodedBatchL2Data - var deltaTimestamp uint64 = math.MaxUint64 - var blockTransactions []types.Transaction - var l1EffectiveGases, effectiveGases []uint8 + var blockState = newBlockState(l1Recovery) + + thisBatch := prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed) + hasAnyTransactionsInThisBatch := false batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) defer batchTicker.Stop() nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) defer nonEmptyBatchTimer.Stop() - hasAnyTransactionsInThisBatch := false - - thisBatch := lastBatch - // if last batch finished - start a new one - if !isLastBatchPariallyProcessed { - thisBatch++ - } - - var intermediateUsedCounters *vm.Counters - if isLastBatchPariallyProcessed { - intermediateCountersMap, found, err := sdb.hermezDb.GetBatchCounters(lastBatch) - if err != nil { - return err - } - if !found { - return fmt.Errorf("intermediate counters not found for batch %d", lastBatch) - } - - intermediateUsedCounters = vm.NewCountersFromUsedMap(intermediateCountersMap) - } - - batchCounters := vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), intermediateUsedCounters) - // check if we just unwound from a bad executor response and if we did just close the batch here - instantClose, err := sdb.hermezDb.GetJustUnwound(thisBatch) + batchCounters, err := prepareBatchCounters(&cfg, sdb, thisBatch, forkId, isLastBatchPariallyProcessed, l1Recovery) if err != nil { return err } - if instantClose { - if err = sdb.hermezDb.DeleteJustUnwound(thisBatch); err != nil { - return err - } - - // lets first check if we actually wrote any blocks in this batch - blocks, err := sdb.hermezDb.GetL2BlockNosByBatch(thisBatch) - if err != nil { - return err - } - // only close this batch down if we actually made any progress in it, otherwise - // just continue processing as normal and recreate the batch from scratch - if len(blocks) > 0 { - if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, blocks[len(blocks)-1], batchCounters); err != nil { - return err - } - if err = stages.SaveStageProgress(rootTx, stages.HighestSeenBatchNumber, thisBatch); err != nil { - return err - } - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { - return err - } - - if freshTx { - if err = rootTx.Commit(); err != nil { - return err - } - } - - return nil - } + // check if we just unwound from a bad executor response and if we did just close the batch here + handled, err := doInstantCloseIfNeeded(logPrefix, &cfg, sdb, thisBatch, forkId, batchCounters) + if err != nil || handled { + return err // err here could be nil as well } runLoopBlocks := true lastStartedBn := executionAt - 1 yielded := mapset.NewSet[[32]byte]() - nextBatchData := l1_data.DecodedL1Data{ - Coinbase: cfg.zk.AddressSequencer, - IsWorkRemaining: true, - } - - decodedBlocksSize := uint64(0) limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(thisBatch) limboRecovery := limboTxHash != nil isAnyRecovery := l1Recovery || limboRecovery @@ -237,68 +135,18 @@ func SpawnSequencingStage( } // let's check if we have any L1 data to recover - nextBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) - if err != nil { + if err = blockState.l1RecoveryData.loadNextBatchData(sdb, thisBatch, forkId); err != nil { return err } - - decodedBlocksSize = uint64(len(nextBatchData.DecodedData)) - if decodedBlocksSize == 0 { + if blockState.l1RecoveryData.hasAnyDecodedBlocks() { log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) time.Sleep(1 * time.Second) return nil } - // now look up the index associated with this info root - var infoTreeIndex uint64 - if nextBatchData.L1InfoRoot == SpecialZeroIndexHash { - infoTreeIndex = 0 - } else { - found := false - infoTreeIndex, found, err = sdb.hermezDb.GetL1InfoTreeIndexByRoot(nextBatchData.L1InfoRoot) - if err != nil { - return err - } - if !found { - return fmt.Errorf("could not find L1 info tree index for root %s", nextBatchData.L1InfoRoot.String()) - } - } - - // now let's detect a bad batch and skip it if we have to - currentBlock, err := rawdb.ReadBlockByNumber(sdb.tx, executionAt) - if err != nil { + if handled, err := doCheckForBadBatch(logPrefix, sdb, blockState.l1RecoveryData, executionAt, thisBatch, forkId); err != nil || handled { return err } - badBatch, err := checkForBadBatch(thisBatch, sdb.hermezDb, currentBlock.Time(), infoTreeIndex, nextBatchData.LimitTimestamp, nextBatchData.DecodedData) - if err != nil { - return err - } - - if badBatch { - log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", logPrefix, thisBatch)) - // store the fact that this batch was invalid during recovery - will be used for the stream later - if err = sdb.hermezDb.WriteInvalidBatch(thisBatch); err != nil { - return err - } - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, map[string]int{}); err != nil { - return err - } - if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err - } - if err = stages.SaveStageProgress(rootTx, stages.HighestSeenBatchNumber, thisBatch); err != nil { - return err - } - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { - return err - } - if freshTx { - if err = rootTx.Commit(); err != nil { - return err - } - } - return nil - } } if !isLastBatchPariallyProcessed { @@ -307,6 +155,7 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, thisBatch, executionAt)) } + hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() batchVerifier := NewBatchVerifier(cfg.zk, hasExecutorForThisBatch, cfg.legacyVerifier, forkId) streamWriter := &SequencerBatchStreamWriter{ ctx: ctx, @@ -321,23 +170,17 @@ func SpawnSequencingStage( blockDataSizeChecker := NewBlockDataChecker() - prevHeader := rawdb.ReadHeaderByNumber(rootTx, executionAt) + prevHeader := rawdb.ReadHeaderByNumber(sdb.tx, executionAt) batchDataOverflow := false var builtBlocks []uint64 var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { if l1Recovery { - decodedBlocksIndex := blockNumber - (executionAt + 1) - if decodedBlocksIndex == decodedBlocksSize { + if !blockState.loadDataByDecodedBlockIndex(blockNumber - (executionAt + 1)) { runLoopBlocks = false break } - - decodedBlock = nextBatchData.DecodedData[decodedBlocksIndex] - deltaTimestamp = uint64(decodedBlock.DeltaTimestamp) - l1EffectiveGases = decodedBlock.EffectiveGasPricePercentages - blockTransactions = decodedBlock.Transactions } l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) @@ -348,13 +191,9 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, forkId)) lastStartedBn = blockNumber + blockState.usedBlockElements.resetBlockBuildingArrays() - addedTransactions := []types.Transaction{} - addedReceipts := []*types.Receipt{} - effectiveGases = []uint8{} - addedExecutionResults := []*core.ExecutionResult{} - - header, parentBlock, err = prepareHeader(rootTx, blockNumber-1, deltaTimestamp, limboHeaderTimestamp, forkId, nextBatchData.Coinbase) + header, parentBlock, err = prepareHeader(sdb.tx, blockNumber-1, blockState.getDeltaTimestamp(), limboHeaderTimestamp, forkId, blockState.getCoinbase(&cfg)) if err != nil { return err } @@ -376,13 +215,13 @@ func SpawnSequencingStage( break } - infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, &decodedBlock, l1Recovery, header.Time) + infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, blockState, header.Time) if err != nil { return err } ibs := state.New(sdb.stateReader) - getHashFn := core.GetHashFn(header, getHeader) + getHashFn := core.GetHashFn(header, func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) }) blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) parentRoot := parentBlock.Root() @@ -439,7 +278,7 @@ func SpawnSequencingStage( default: if limboRecovery { cfg.txPool.LockFlusher() - blockTransactions, err = getLimboTransaction(cfg, limboTxHash) + blockState.blockTransactions, err = getLimboTransaction(cfg, limboTxHash) if err != nil { cfg.txPool.UnlockFlusher() return err @@ -447,7 +286,7 @@ func SpawnSequencingStage( cfg.txPool.UnlockFlusher() } else if !l1Recovery { cfg.txPool.LockFlusher() - blockTransactions, err = getNextPoolTransactions(cfg, executionAt, forkId, yielded) + blockState.blockTransactions, err = getNextPoolTransactions(cfg, executionAt, forkId, yielded) if err != nil { cfg.txPool.UnlockFlusher() return err @@ -457,16 +296,9 @@ func SpawnSequencingStage( var receipt *types.Receipt var execResult *core.ExecutionResult - for i, transaction := range blockTransactions { + for i, transaction := range blockState.blockTransactions { txHash := transaction.Hash() - - var effectiveGas uint8 - - if l1Recovery { - effectiveGas = l1EffectiveGases[i] - } else { - effectiveGas = DeriveEffectiveGasPrice(cfg, transaction) - } + effectiveGas := blockState.getL1EffectiveGases(cfg, i) backupDataSizeChecker := *blockDataSizeChecker if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, l1Recovery, forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { @@ -514,10 +346,7 @@ func SpawnSequencingStage( if err == nil { blockDataSizeChecker = &backupDataSizeChecker yielded.Remove(txHash) - addedTransactions = append(addedTransactions, transaction) - addedReceipts = append(addedReceipts, receipt) - addedExecutionResults = append(addedExecutionResults, execResult) - effectiveGases = append(effectiveGases, effectiveGas) + blockState.usedBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) hasAnyTransactionsInThisBatch = true nonEmptyBatchTimer.Reset(cfg.zk.SequencerNonEmptyBatchSealTime) @@ -528,7 +357,7 @@ func SpawnSequencingStage( if l1Recovery { // just go into the normal loop waiting for new transactions to signal that the recovery // has finished as far as it can go - if len(blockTransactions) == 0 && !nextBatchData.IsWorkRemaining { + if blockState.isThereAnyTransactionsToRecover() { log.Info(fmt.Sprintf("[%s] L1 recovery no more transactions to recover", logPrefix)) } @@ -546,7 +375,7 @@ func SpawnSequencingStage( return err } - block, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, ger, l1BlockHash, addedTransactions, addedReceipts, addedExecutionResults, effectiveGases, infoTreeIndexProgress, l1Recovery) + block, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, ger, l1BlockHash, &blockState.usedBlockElements, infoTreeIndexProgress, l1Recovery) if err != nil { return err } @@ -567,9 +396,9 @@ func SpawnSequencingStage( } if gasPerSecond != 0 { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(addedTransactions), int(gasPerSecond))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(blockState.usedBlockElements.transactions), int(gasPerSecond))) } else { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(addedTransactions))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(blockState.usedBlockElements.transactions))) } err = sdb.hermezDb.WriteBatchCounters(thisBatch, batchCounters.CombineCollectorsNoChanges().UsedAsMap()) @@ -582,15 +411,10 @@ func SpawnSequencingStage( return err } - if err = rootTx.Commit(); err != nil { + if err = sdb.CommitAndStart(); err != nil { return err } - rootTx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer rootTx.Rollback() - sdb.SetTx(rootTx) + defer sdb.tx.Rollback() lastBatch = thisBatch @@ -626,10 +450,8 @@ func SpawnSequencingStage( return err } - if freshTx { - if err = rootTx.Commit(); err != nil { - return err - } + if err = sdb.tx.Commit(); err != nil { + return err } return nil diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go new file mode 100644 index 00000000000..7007d7fcd74 --- /dev/null +++ b/zk/stages/stage_sequence_execute_batch.go @@ -0,0 +1,223 @@ +package stages + +import ( + "fmt" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/zk/datastream/server" + "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" +) + +func prepareBatchNumber(lastBatch uint64, isLastBatchPariallyProcessed bool) uint64 { + if isLastBatchPariallyProcessed { + return lastBatch + } + + return lastBatch + 1 +} + +func prepareBatchCounters(cfg *SequenceBlockCfg, sdb *stageDb, thisBatch, forkId uint64, isLastBatchPariallyProcessed, l1Recovery bool) (*vm.BatchCounterCollector, error) { + var intermediateUsedCounters *vm.Counters + if isLastBatchPariallyProcessed { + intermediateCountersMap, found, err := sdb.hermezDb.GetBatchCounters(thisBatch) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("intermediate counters not found for batch %d", thisBatch) + } + + intermediateUsedCounters = vm.NewCountersFromUsedMap(intermediateCountersMap) + } + + return vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), intermediateUsedCounters), nil +} + +func doInstantCloseIfNeeded(logPrefix string, cfg *SequenceBlockCfg, sdb *stageDb, thisBatch, forkId uint64, batchCounters *vm.BatchCounterCollector) (bool, error) { + instantClose, err := sdb.hermezDb.GetJustUnwound(thisBatch) + if err != nil || !instantClose { + return false, err // err here could be nil as well + } + + if err = sdb.hermezDb.DeleteJustUnwound(thisBatch); err != nil { + return false, err + } + + // lets first check if we actually wrote any blocks in this batch + blocks, err := sdb.hermezDb.GetL2BlockNosByBatch(thisBatch) + if err != nil { + return false, err + } + + // only close this batch down if we actually made any progress in it, otherwise + // just continue processing as normal and recreate the batch from scratch + if len(blocks) > 0 { + if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, blocks[len(blocks)-1], batchCounters); err != nil { + return false, err + } + if err = stages.SaveStageProgress(sdb.tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + return false, err + } + if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { + return false, err + } + + if err = sdb.tx.Commit(); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +func doCheckForBadBatch(logPrefix string, sdb *stageDb, l1rd *L1RecoveryData, thisBlock, thisBatch, forkId uint64) (bool, error) { + infoTreeIndex, err := l1rd.getInfoTreeIndex(sdb) + if err != nil { + return false, err + } + + // now let's detect a bad batch and skip it if we have to + currentBlock, err := rawdb.ReadBlockByNumber(sdb.tx, thisBlock) + if err != nil { + return false, err + } + + badBatch, err := checkForBadBatch(thisBatch, sdb.hermezDb, currentBlock.Time(), infoTreeIndex, l1rd.nextBatchData.LimitTimestamp, l1rd.nextBatchData.DecodedData) + if err != nil { + return false, err + } + + if !badBatch { + return false, nil + } + + log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", logPrefix, thisBatch)) + // store the fact that this batch was invalid during recovery - will be used for the stream later + if err = sdb.hermezDb.WriteInvalidBatch(thisBatch); err != nil { + return false, err + } + if err = sdb.hermezDb.WriteBatchCounters(thisBatch, map[string]int{}); err != nil { + return false, err + } + if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + return false, err + } + if err = stages.SaveStageProgress(sdb.tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + return false, err + } + if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { + return false, err + } + if err = sdb.tx.Commit(); err != nil { + return false, err + } + return true, nil +} + +func updateStreamAndCheckRollback( + logPrefix string, + sdb *stageDb, + streamWriter *SequencerBatchStreamWriter, + batchNumber uint64, + forkId uint64, + u stagedsync.Unwinder, +) (bool, int, error) { + committed, remaining, err := streamWriter.CommitNewUpdates(forkId) + if err != nil { + return false, remaining, err + } + for _, commit := range committed { + if !commit.Valid { + // we are about to unwind so place the marker ready for this to happen + if err = sdb.hermezDb.WriteJustUnwound(batchNumber); err != nil { + return false, 0, err + } + // capture the fork otherwise when the loop starts again to close + // off the batch it will detect it as a fork upgrade + if err = sdb.hermezDb.WriteForkId(batchNumber, forkId); err != nil { + return false, 0, err + } + + unwindTo := commit.BlockNumber - 1 + + // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block + // causing the unwind. + unwindHeader := rawdb.ReadHeaderByNumber(sdb.tx, commit.BlockNumber) + if unwindHeader == nil { + return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) + } + + if err = sdb.tx.Commit(); err != nil { + return false, 0, err + } + + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + + u.UnwindTo(unwindTo, unwindHeader.Hash()) + return true, 0, nil + } + } + + return false, remaining, nil +} + +func runBatchLastSteps( + logPrefix string, + datastreamServer *server.DataStreamServer, + sdb *stageDb, + thisBatch uint64, + lastStartedBn uint64, + batchCounters *vm.BatchCounterCollector, +) error { + l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) + if err != nil { + return err + } + + counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) + + if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { + return err + } + if err := sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + return err + } + + // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch + ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) + if err != nil { + return err + } + // write ler to hermezdb + if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + return err + } + + lastBlock, err := sdb.hermezDb.GetHighestBlockInBatch(thisBatch) + if err != nil { + return err + } + block, err := rawdb.ReadBlockByNumber(sdb.tx, lastBlock) + if err != nil { + return err + } + blockRoot := block.Root() + if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) + + return nil +} diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 23637883d4f..e48eb92427f 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -66,6 +66,48 @@ func handleStateForNewBlockStarting( return nil } +func doFinishBlockAndUpdateState( + ctx context.Context, + cfg SequenceBlockCfg, + s *stagedsync.StageState, + sdb *stageDb, + ibs *state.IntraBlockState, + header *types.Header, + parentBlock *types.Block, + forkId uint64, + thisBatch uint64, + ger common.Hash, + l1BlockHash common.Hash, + usedBlockElements *UsedBlockElements, + l1InfoIndex uint64, + l1Recovery bool, +) (*types.Block, error) { + thisBlockNumber := header.Number.Uint64() + + if cfg.accumulator != nil { + cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) + } + + block, err := finaliseBlock(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, cfg.accumulator, ger, l1BlockHash, usedBlockElements, l1Recovery) + if err != nil { + return nil, err + } + + if err := updateSequencerProgress(sdb.tx, thisBlockNumber, thisBatch, l1InfoIndex); err != nil { + return nil, err + } + + if cfg.accumulator != nil { + txs, err := rawdb.RawTransactionsRange(sdb.tx, thisBlockNumber, thisBlockNumber) + if err != nil { + return nil, err + } + cfg.accumulator.ChangeTransactions(txs) + } + + return block, nil +} + func finaliseBlock( ctx context.Context, cfg SequenceBlockCfg, @@ -79,10 +121,7 @@ func finaliseBlock( accumulator *shards.Accumulator, ger common.Hash, l1BlockHash common.Hash, - transactions []types.Transaction, - receipts types.Receipts, - execResults []*core.ExecutionResult, - effectiveGases []uint8, + usedBlockElements *UsedBlockElements, l1Recovery bool, ) (*types.Block, error) { stateWriter := state.NewPlainStateWriter(sdb.tx, sdb.tx, newHeader.Number.Uint64()).SetAccumulator(accumulator) @@ -97,7 +136,7 @@ func finaliseBlock( } txInfos := []blockinfo.ExecutedTxInfo{} - for i, tx := range transactions { + for i, tx := range usedBlockElements.transactions { var from common.Address var err error sender, ok := tx.GetSender() @@ -110,10 +149,10 @@ func finaliseBlock( return nil, err } } - localReceipt := core.CreateReceiptForBlockInfoTree(receipts[i], cfg.chainConfig, newHeader.Number.Uint64(), execResults[i]) + localReceipt := core.CreateReceiptForBlockInfoTree(usedBlockElements.receipts[i], cfg.chainConfig, newHeader.Number.Uint64(), usedBlockElements.executionResults[i]) txInfos = append(txInfos, blockinfo.ExecutedTxInfo{ Tx: tx, - EffectiveGasPrice: effectiveGases[i], + EffectiveGasPrice: usedBlockElements.effectiveGases[i], Receipt: localReceipt, Signer: &from, }) @@ -124,8 +163,8 @@ func finaliseBlock( } if l1Recovery { - for i, receipt := range receipts { - core.ProcessReceiptForBlockExecution(receipt, sdb.hermezDb.HermezDbReader, cfg.chainConfig, newHeader.Number.Uint64(), newHeader, transactions[i]) + for i, receipt := range usedBlockElements.receipts { + core.ProcessReceiptForBlockExecution(receipt, sdb.hermezDb.HermezDbReader, cfg.chainConfig, newHeader.Number.Uint64(), newHeader, usedBlockElements.transactions[i]) } } @@ -133,12 +172,12 @@ func finaliseBlock( cfg.engine, sdb.stateReader, newHeader, - transactions, + usedBlockElements.transactions, []*types.Header{}, // no uncles stateWriter, cfg.chainConfig, ibs, - receipts, + usedBlockElements.receipts, nil, // no withdrawals chainReader, true, @@ -157,8 +196,8 @@ func finaliseBlock( finalHeader.Root = newRoot finalHeader.Coinbase = cfg.zk.AddressSequencer finalHeader.GasLimit = utils.GetBlockGasLimitForFork(forkId) - finalHeader.ReceiptHash = types.DeriveSha(receipts) - finalHeader.Bloom = types.CreateBloom(receipts) + finalHeader.ReceiptHash = types.DeriveSha(usedBlockElements.receipts) + finalHeader.Bloom = types.CreateBloom(usedBlockElements.receipts) newNum := finalBlock.Number() err = rawdb.WriteHeader_zkEvm(sdb.tx, finalHeader) diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index c15a1af7a39..ecc1c4adcda 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -2,11 +2,14 @@ package stages import ( "context" + "fmt" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/zk/datastream/server" verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" ) type SequencerBatchStreamWriter struct { @@ -85,3 +88,30 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetails(verifiedBundles []*veri return written, nil } + +func finalizeLastBatchInDatastreamIfNotFinalized(logPrefix string, sdb *stageDb, datastreamServer *server.DataStreamServer, thisBatch, thisBlock uint64) error { + isLastEntryBatchEnd, err := datastreamServer.IsLastEntryBatchEnd() + if err != nil { + return err + } + + if isLastEntryBatchEnd { + return nil + } + + log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, thisBatch)) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) + if err != nil { + return err + } + + lastBlock, err := rawdb.ReadBlockByNumber(sdb.tx, thisBlock) + if err != nil { + return err + } + root := lastBlock.Root() + if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch-1, &root, &ler); err != nil { + return err + } + return nil +} diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 852ecfd5b9b..be957183178 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -2,10 +2,13 @@ package stages import ( "context" + "math" "errors" + "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" @@ -13,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" zktx "github.com/ledgerwatch/erigon/zk/tx" zktypes "github.com/ledgerwatch/erigon/zk/types" + "github.com/ledgerwatch/erigon/zk/utils" ) const ( @@ -27,11 +31,22 @@ func processInjectedInitialBatch( s *stagedsync.StageState, sdb *stageDb, forkId uint64, - header *types.Header, - parentBlock *types.Block, - blockContext *evmtypes.BlockContext, l1Recovery bool, ) error { + // set the block height for the fork we're running at to ensure contract interactions are correct + if err := utils.RecoverySetBlockConfigForks(injectedBatchBlockNumber, forkId, cfg.chainConfig, s.LogPrefix()); err != nil { + return err + } + + header, parentBlock, err := prepareHeader(sdb.tx, 0, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) + if err != nil { + return err + } + + getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) } + getHashFn := core.GetHashFn(header, getHeader) + blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) + injected, err := sdb.hermezDb.GetL1InjectedBatch(0) if err != nil { return err @@ -63,17 +78,19 @@ func processInjectedInitialBatch( return err } - txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(cfg, sdb, ibs, blockContext, injected, header, parentBlock, forkId) + txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(cfg, sdb, ibs, &blockContext, injected, header, parentBlock, forkId) if err != nil { return err } - txns := types.Transactions{*txn} - receipts := types.Receipts{receipt} - execResults := []*core.ExecutionResult{execResult} - effectiveGases := []uint8{effectiveGas} + usedBlockElements := &UsedBlockElements{ + transactions: types.Transactions{*txn}, + receipts: types.Receipts{receipt}, + executionResults: []*core.ExecutionResult{execResult}, + effectiveGases: []uint8{effectiveGas}, + } - _, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, injectedBatchNumber, injected.LastGlobalExitRoot, injected.L1ParentHash, txns, receipts, execResults, effectiveGases, 0, l1Recovery) + _, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, injectedBatchNumber, injected.LastGlobalExitRoot, injected.L1ParentHash, usedBlockElements, 0, l1Recovery) return err } diff --git a/zk/stages/stage_sequence_execute_l1recovery.go b/zk/stages/stage_sequence_execute_l1recovery.go new file mode 100644 index 00000000000..aeccc1a3299 --- /dev/null +++ b/zk/stages/stage_sequence_execute_l1recovery.go @@ -0,0 +1,148 @@ +package stages + +import ( + "fmt" + "math" + + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/zk/l1_data" + zktx "github.com/ledgerwatch/erigon/zk/tx" +) + +// TYPE BLOCK STATE +type BlockState struct { + blockTransactions []types.Transaction + l1RecoveryData *L1RecoveryData + usedBlockElements UsedBlockElements +} + +func newBlockState(l1Recovery bool) *BlockState { + blockState := &BlockState{} + + if l1Recovery { + blockState.l1RecoveryData = newL1RecoveryData() + } + + return blockState +} + +func (bs *BlockState) isL1Recovery() bool { + return bs.l1RecoveryData != nil +} + +func (bs *BlockState) isThereAnyTransactionsToRecover() bool { + if !bs.isL1Recovery() { + return false + } + + return len(bs.blockTransactions) != 0 || bs.l1RecoveryData.nextBatchData.IsWorkRemaining +} + +func (bs *BlockState) loadDataByDecodedBlockIndex(decodedBlocksIndex uint64) bool { + if !bs.l1RecoveryData.loadDataByDecodedBlockIndex(decodedBlocksIndex) { + return false + } + + bs.blockTransactions = bs.l1RecoveryData.decodedBlock.Transactions + return true +} + +func (bs *BlockState) getDeltaTimestamp() uint64 { + if bs.l1RecoveryData != nil { + return uint64(bs.l1RecoveryData.decodedBlock.DeltaTimestamp) + } + + return math.MaxUint64 +} + +func (bs *BlockState) getCoinbase(cfg *SequenceBlockCfg) common.Address { + if bs.l1RecoveryData != nil { + return bs.l1RecoveryData.nextBatchData.Coinbase + } + + return cfg.zk.AddressSequencer +} + +func (bs *BlockState) getL1EffectiveGases(cfg SequenceBlockCfg, i int) uint8 { + if bs.isL1Recovery() { + return bs.l1RecoveryData.decodedBlock.EffectiveGasPricePercentages[i] + } + + return DeriveEffectiveGasPrice(cfg, bs.blockTransactions[i]) +} + +// TYPE BLOCK ELEMENTS +type UsedBlockElements struct { + transactions []types.Transaction + receipts types.Receipts + effectiveGases []uint8 + executionResults []*core.ExecutionResult +} + +func (ube *UsedBlockElements) resetBlockBuildingArrays() { + ube.transactions = []types.Transaction{} + ube.receipts = types.Receipts{} + ube.effectiveGases = []uint8{} + ube.executionResults = []*core.ExecutionResult{} +} + +func (ube *UsedBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { + ube.transactions = append(ube.transactions, transaction) + ube.receipts = append(ube.receipts, receipt) + ube.executionResults = append(ube.executionResults, execResult) + ube.effectiveGases = append(ube.effectiveGases, effectiveGas) +} + +// TYPE L1 RECOVERY DATA +type L1RecoveryData struct { + decodedBlocksSize uint64 + decodedBlock *zktx.DecodedBatchL2Data + nextBatchData *l1_data.DecodedL1Data +} + +func newL1RecoveryData() *L1RecoveryData { + return &L1RecoveryData{} +} + +func (l1rd *L1RecoveryData) loadNextBatchData(sdb *stageDb, thisBatch, forkId uint64) (err error) { + l1rd.nextBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) + if err != nil { + return err + } + + l1rd.decodedBlocksSize = uint64(len(l1rd.nextBatchData.DecodedData)) + return nil +} + +func (l1rd *L1RecoveryData) hasAnyDecodedBlocks() bool { + return l1rd.decodedBlocksSize == 0 +} + +func (l1rd *L1RecoveryData) getInfoTreeIndex(sdb *stageDb) (uint64, error) { + var infoTreeIndex uint64 + + if l1rd.nextBatchData.L1InfoRoot == SpecialZeroIndexHash { + return uint64(0), nil + } + + infoTreeIndex, found, err := sdb.hermezDb.GetL1InfoTreeIndexByRoot(l1rd.nextBatchData.L1InfoRoot) + if err != nil { + return uint64(0), err + } + if !found { + return uint64(0), fmt.Errorf("could not find L1 info tree index for root %s", l1rd.nextBatchData.L1InfoRoot.String()) + } + + return infoTreeIndex, nil +} + +func (l1rd *L1RecoveryData) loadDataByDecodedBlockIndex(decodedBlocksIndex uint64) bool { + if decodedBlocksIndex == l1rd.decodedBlocksSize { + return false + } + + l1rd.decodedBlock = &l1rd.nextBatchData.DecodedData[decodedBlocksIndex] + return true +} diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index aa9130e7fe9..fa6a5a86d5e 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -2,7 +2,6 @@ package stages import ( "context" - "encoding/binary" "time" "github.com/gateway-fm/cdk-erigon-lib/common" @@ -12,7 +11,6 @@ import ( "io" mapset "github.com/deckarep/golang-set/v2" - "github.com/gateway-fm/cdk-erigon-lib/common/length" types2 "github.com/gateway-fm/cdk-erigon-lib/types" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" @@ -20,8 +18,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/zk/hermez_db" - zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" ) @@ -100,56 +96,6 @@ func getLimboTransaction(cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Tra return transactions, nil } -func getNextL1BatchData(batchNumber uint64, forkId uint64, hermezDb *hermez_db.HermezDb) (*nextBatchL1Data, error) { - nextData := &nextBatchL1Data{} - // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync - // stage, if it is not there we need to panic as we're in a bad state - batchL2Data, err := hermezDb.GetL1BatchData(batchNumber) - if err != nil { - return nextData, err - } - - if len(batchL2Data) == 0 { - // end of the line for batch recovery so return empty - return nextData, nil - } - - nextData.Coinbase = common.BytesToAddress(batchL2Data[:length.Addr]) - nextData.L1InfoRoot = common.BytesToHash(batchL2Data[length.Addr : length.Addr+length.Hash]) - tsBytes := batchL2Data[length.Addr+length.Hash : length.Addr+length.Hash+8] - nextData.LimitTimestamp = binary.BigEndian.Uint64(tsBytes) - batchL2Data = batchL2Data[length.Addr+length.Hash+8:] - - nextData.DecodedData, err = zktx.DecodeBatchL2Blocks(batchL2Data, forkId) - if err != nil { - return nextData, err - } - - // no data means no more work to do - end of the line - if len(nextData.DecodedData) == 0 { - return nextData, nil - } - - nextData.IsWorkRemaining = true - transactionsInBatch := 0 - for _, batch := range nextData.DecodedData { - transactionsInBatch += len(batch.Transactions) - } - if transactionsInBatch == 0 { - // we need to check if this batch should simply be empty or not so we need to check against the - // highest known batch number to see if we have work to do still - highestKnown, err := hermezDb.GetLastL1BatchData() - if err != nil { - return nextData, err - } - if batchNumber >= highestKnown { - nextData.IsWorkRemaining = false - } - } - - return nextData, err -} - func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, error) { transactions := make([]types.Transaction, 0, len(slot.Txs)) reader := bytes.NewReader([]byte{}) diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 8fb68a7697b..41e581789ea 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -1,7 +1,6 @@ package stages import ( - "context" "time" "github.com/c2h5oh/datasize" @@ -18,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon/chain" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -27,8 +25,6 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" - db2 "github.com/ledgerwatch/erigon/smt/pkg/db" - smtNs "github.com/ledgerwatch/erigon/smt/pkg/smt" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" @@ -44,19 +40,16 @@ import ( ) const ( - logInterval = 20 * time.Second - - // stateStreamLimit - don't accumulate state changes if jump is bigger than this amount of blocks - stateStreamLimit uint64 = 1_000 - + logInterval = 20 * time.Second transactionGasLimit = 30000000 + yieldSize = 100 // arbitrary number defining how many transactions to yield from the pool at once - yieldSize = 100 // arbitrary number defining how many transactions to yield from the pool at once ) var ( - noop = state.NewNoopWriter() - blockDifficulty = new(big.Int).SetUint64(0) + noop = state.NewNoopWriter() + blockDifficulty = new(big.Int).SetUint64(0) + SpecialZeroIndexHash = common.HexToHash("0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757") ) type HasChangeSetWriter interface { @@ -166,45 +159,7 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC ) } -type stageDb struct { - tx kv.RwTx - hermezDb *hermez_db.HermezDb - eridb *db2.EriDb - stateReader *state.PlainStateReader - smt *smtNs.SMT -} - -func newStageDb(tx kv.RwTx) *stageDb { - sdb := &stageDb{} - sdb.SetTx(tx) - return sdb -} - -func (sdb *stageDb) SetTx(tx kv.RwTx) { - sdb.tx = tx - sdb.hermezDb = hermez_db.NewHermezDb(tx) - sdb.eridb = db2.NewEriDb(tx) - sdb.stateReader = state.NewPlainStateReader(tx) - sdb.smt = smtNs.NewSMT(sdb.eridb, false) -} - -type nextBatchL1Data struct { - DecodedData []zktx.DecodedBatchL2Data - Coinbase common.Address - L1InfoRoot common.Hash - IsWorkRemaining bool - LimitTimestamp uint64 -} - -type forkDb interface { - GetAllForkHistory() ([]uint64, []uint64, error) - GetLatestForkHistory() (uint64, uint64, error) - GetForkId(batch uint64) (uint64, error) - WriteForkIdBlockOnce(forkId, block uint64) error - WriteForkId(batch, forkId uint64) error -} - -func prepareForkId(lastBatch, executionAt uint64, hermezDb forkDb) (uint64, error) { +func prepareForkId(lastBatch, executionAt uint64, hermezDb *hermez_db.HermezDb) (uint64, error) { var err error var latest uint64 @@ -275,7 +230,7 @@ func prepareHeader(tx kv.RwTx, previousBlockNumber, deltaTimestamp, forcedTimest }, parentBlock, nil } -func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBatchL2Data, l1Recovery bool, proposedTimestamp uint64) ( +func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, bs *BlockState, proposedTimestamp uint64) ( infoTreeIndexProgress uint64, l1TreeUpdate *zktypes.L1InfoTreeUpdate, l1TreeUpdateIndex uint64, @@ -293,8 +248,8 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBa return } - if l1Recovery { - l1TreeUpdateIndex = uint64(decodedBlock.L1InfoTreeIndex) + if bs.isL1Recovery() { + l1TreeUpdateIndex = uint64(bs.l1RecoveryData.decodedBlock.L1InfoTreeIndex) if l1TreeUpdate, err = sdb.hermezDb.GetL1InfoTreeUpdate(l1TreeUpdateIndex); err != nil { return } @@ -359,51 +314,6 @@ func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1In return nil } -func doFinishBlockAndUpdateState( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, - ibs *state.IntraBlockState, - header *types.Header, - parentBlock *types.Block, - forkId uint64, - thisBatch uint64, - ger common.Hash, - l1BlockHash common.Hash, - transactions []types.Transaction, - receipts types.Receipts, - execResults []*core.ExecutionResult, - effectiveGases []uint8, - l1InfoIndex uint64, - l1Recovery bool, -) (*types.Block, error) { - thisBlockNumber := header.Number.Uint64() - - if cfg.accumulator != nil { - cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) - } - - block, err := finaliseBlock(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, cfg.accumulator, ger, l1BlockHash, transactions, receipts, execResults, effectiveGases, l1Recovery) - if err != nil { - return nil, err - } - - if err := updateSequencerProgress(sdb.tx, thisBlockNumber, thisBatch, l1InfoIndex); err != nil { - return nil, err - } - - if cfg.accumulator != nil { - txs, err := rawdb.RawTransactionsRange(sdb.tx, thisBlockNumber, thisBlockNumber) - if err != nil { - return nil, err - } - cfg.accumulator.ChangeTransactions(txs) - } - - return block, nil -} - type batchChecker interface { GetL1InfoTreeUpdate(idx uint64) (*zktypes.L1InfoTreeUpdate, error) } @@ -496,105 +406,3 @@ func (bdc *BlockDataChecker) AddTransactionData(txL2Data []byte) bool { return false } - -func updateStreamAndCheckRollback( - logPrefix string, - sdb *stageDb, - streamWriter *SequencerBatchStreamWriter, - batchNumber uint64, - forkId uint64, - u stagedsync.Unwinder, -) (bool, int, error) { - committed, remaining, err := streamWriter.CommitNewUpdates(forkId) - if err != nil { - return false, remaining, err - } - for _, commit := range committed { - if !commit.Valid { - // we are about to unwind so place the marker ready for this to happen - if err = sdb.hermezDb.WriteJustUnwound(batchNumber); err != nil { - return false, 0, err - } - // capture the fork otherwise when the loop starts again to close - // off the batch it will detect it as a fork upgrade - if err = sdb.hermezDb.WriteForkId(batchNumber, forkId); err != nil { - return false, 0, err - } - - unwindTo := commit.BlockNumber - 1 - - // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block - // causing the unwind. - unwindHeader := rawdb.ReadHeaderByNumber(sdb.tx, commit.BlockNumber) - if unwindHeader == nil { - return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) - } - - if err = sdb.tx.Commit(); err != nil { - return false, 0, err - } - - log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) - - u.UnwindTo(unwindTo, unwindHeader.Hash()) - return true, 0, nil - } - } - - return false, remaining, nil -} - -func runBatchLastSteps( - logPrefix string, - datastreamServer *server.DataStreamServer, - sdb *stageDb, - thisBatch uint64, - lastStartedBn uint64, - batchCounters *vm.BatchCounterCollector, -) error { - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) - if err != nil { - return err - } - - counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) - if err != nil { - return err - } - - log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) - - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { - return err - } - if err := sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err - } - - // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch - ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) - if err != nil { - return err - } - // write ler to hermezdb - if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { - return err - } - - lastBlock, err := sdb.hermezDb.GetHighestBlockInBatch(thisBatch) - if err != nil { - return err - } - block, err := rawdb.ReadBlockByNumber(sdb.tx, lastBlock) - if err != nil { - return err - } - blockRoot := block.Root() - if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { - return err - } - - log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) - - return nil -} diff --git a/zk/stages/stage_sequence_execute_utils_db.go b/zk/stages/stage_sequence_execute_utils_db.go new file mode 100644 index 00000000000..cbc98015383 --- /dev/null +++ b/zk/stages/stage_sequence_execute_utils_db.go @@ -0,0 +1,63 @@ +package stages + +import ( + "context" + "fmt" + + "github.com/gateway-fm/cdk-erigon-lib/kv" + + "github.com/ledgerwatch/erigon/core/state" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + smtNs "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +type stageDb struct { + ctx context.Context + db kv.RwDB + + tx kv.RwTx + hermezDb *hermez_db.HermezDb + eridb *db2.EriDb + stateReader *state.PlainStateReader + smt *smtNs.SMT +} + +func newStageDb(ctx context.Context, tx kv.RwTx, db kv.RwDB) (sdb *stageDb, err error) { + if tx != nil { + return nil, fmt.Errorf("sequencer cannot use global db's tx object, because it commits the tx object itself") + } + + if tx, err = db.BeginRw(ctx); err != nil { + return nil, err + } + + sdb = &stageDb{ + ctx: ctx, + db: db, + } + sdb.SetTx(tx) + return sdb, nil +} + +func (sdb *stageDb) SetTx(tx kv.RwTx) { + sdb.tx = tx + sdb.hermezDb = hermez_db.NewHermezDb(tx) + sdb.eridb = db2.NewEriDb(tx) + sdb.stateReader = state.NewPlainStateReader(tx) + sdb.smt = smtNs.NewSMT(sdb.eridb, false) +} + +func (sdb *stageDb) CommitAndStart() (err error) { + if err = sdb.tx.Commit(); err != nil { + return err + } + + tx, err := sdb.db.BeginRw(sdb.ctx) + if err != nil { + return err + } + + sdb.SetTx(tx) + return nil +} From e86790362b4151b084c996fdf1b4e12e77b14395 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 15:16:01 +0000 Subject: [PATCH 05/33] organize var declations --- zk/stages/stage_sequence_execute.go | 69 +++++++++---------- ... => stage_sequence_execute_batch_state.go} | 2 +- 2 files changed, 34 insertions(+), 37 deletions(-) rename zk/stages/{stage_sequence_execute_l1recovery.go => stage_sequence_execute_batch_state.go} (99%) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 5bc07e7bed0..afba69aee51 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -60,6 +60,7 @@ func SpawnSequencingStage( } l1Recovery := cfg.zk.L1SyncStartBlock > 0 + thisBatch := prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed) // injected batch if executionAt == 0 { @@ -90,19 +91,6 @@ func SpawnSequencingStage( return err } - var header *types.Header - var parentBlock *types.Block - - var blockState = newBlockState(l1Recovery) - - thisBatch := prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed) - hasAnyTransactionsInThisBatch := false - - batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) - defer batchTicker.Stop() - nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) - defer nonEmptyBatchTimer.Stop() - batchCounters, err := prepareBatchCounters(&cfg, sdb, thisBatch, forkId, isLastBatchPariallyProcessed, l1Recovery) if err != nil { return err @@ -114,9 +102,36 @@ func SpawnSequencingStage( return err // err here could be nil as well } + batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) + defer batchTicker.Stop() + nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) + defer nonEmptyBatchTimer.Stop() + + var builtBlocks []uint64 + + hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() + hasAnyTransactionsInThisBatch := false runLoopBlocks := true lastStartedBn := executionAt - 1 yielded := mapset.NewSet[[32]byte]() + blockState := newBlockState(l1Recovery) + + batchVerifier := NewBatchVerifier(cfg.zk, hasExecutorForThisBatch, cfg.legacyVerifier, forkId) + streamWriter := &SequencerBatchStreamWriter{ + ctx: ctx, + db: cfg.db, + logPrefix: logPrefix, + batchVerifier: batchVerifier, + sdb: sdb, + streamServer: cfg.datastreamServer, + hasExecutors: hasExecutorForThisBatch, + lastBatch: lastBatch, + } + + blockDataSizeChecker := NewBlockDataChecker() + + prevHeader := rawdb.ReadHeaderByNumber(sdb.tx, executionAt) + batchDataOverflow := false limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(thisBatch) limboRecovery := limboTxHash != nil @@ -138,7 +153,8 @@ func SpawnSequencingStage( if err = blockState.l1RecoveryData.loadNextBatchData(sdb, thisBatch, forkId); err != nil { return err } - if blockState.l1RecoveryData.hasAnyDecodedBlocks() { + + if !blockState.l1RecoveryData.hasAnyDecodedBlocks() { log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) time.Sleep(1 * time.Second) return nil @@ -155,29 +171,11 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, thisBatch, executionAt)) } - hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() - batchVerifier := NewBatchVerifier(cfg.zk, hasExecutorForThisBatch, cfg.legacyVerifier, forkId) - streamWriter := &SequencerBatchStreamWriter{ - ctx: ctx, - db: cfg.db, - logPrefix: logPrefix, - batchVerifier: batchVerifier, - sdb: sdb, - streamServer: cfg.datastreamServer, - hasExecutors: hasExecutorForThisBatch, - lastBatch: lastBatch, - } - - blockDataSizeChecker := NewBlockDataChecker() - - prevHeader := rawdb.ReadHeaderByNumber(sdb.tx, executionAt) - batchDataOverflow := false - var builtBlocks []uint64 - var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { if l1Recovery { - if !blockState.loadDataByDecodedBlockIndex(blockNumber - (executionAt + 1)) { + didLoadedAnyData := blockState.loadDataByDecodedBlockIndex(blockNumber - (executionAt + 1)) + if !didLoadedAnyData { runLoopBlocks = false break } @@ -193,12 +191,11 @@ func SpawnSequencingStage( lastStartedBn = blockNumber blockState.usedBlockElements.resetBlockBuildingArrays() - header, parentBlock, err = prepareHeader(sdb.tx, blockNumber-1, blockState.getDeltaTimestamp(), limboHeaderTimestamp, forkId, blockState.getCoinbase(&cfg)) + header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, blockState.getDeltaTimestamp(), limboHeaderTimestamp, forkId, blockState.getCoinbase(&cfg)) if err != nil { return err } - // run this only once the first time, do not add it on rerun if batchDataOverflow = blockDataSizeChecker.AddBlockStartData(uint32(prevHeader.Time-header.Time), uint32(l1InfoIndex)); batchDataOverflow { log.Info(fmt.Sprintf("[%s] BatchL2Data limit reached. Stopping.", logPrefix), "blockNumber", blockNumber) break diff --git a/zk/stages/stage_sequence_execute_l1recovery.go b/zk/stages/stage_sequence_execute_batch_state.go similarity index 99% rename from zk/stages/stage_sequence_execute_l1recovery.go rename to zk/stages/stage_sequence_execute_batch_state.go index aeccc1a3299..8f5cef54313 100644 --- a/zk/stages/stage_sequence_execute_l1recovery.go +++ b/zk/stages/stage_sequence_execute_batch_state.go @@ -117,7 +117,7 @@ func (l1rd *L1RecoveryData) loadNextBatchData(sdb *stageDb, thisBatch, forkId ui } func (l1rd *L1RecoveryData) hasAnyDecodedBlocks() bool { - return l1rd.decodedBlocksSize == 0 + return l1rd.decodedBlocksSize > 0 } func (l1rd *L1RecoveryData) getInfoTreeIndex(sdb *stageDb) (uint64, error) { From 96558b270232085cac0738cfe765184dc13ceaa4 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 29 Jul 2024 15:50:32 +0000 Subject: [PATCH 06/33] restore interface --- zk/stages/stage_sequence_execute_utils.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index bde97dce028..630129e49d2 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -163,7 +163,15 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC ) } -func prepareForkId(lastBatch, executionAt uint64, hermezDb *hermez_db.HermezDb) (uint64, error) { +type forkDb interface { + GetAllForkHistory() ([]uint64, []uint64, error) + GetLatestForkHistory() (uint64, uint64, error) + GetForkId(batch uint64) (uint64, error) + WriteForkIdBlockOnce(forkId, block uint64) error + WriteForkId(batch, forkId uint64) error +} + +func prepareForkId(lastBatch, executionAt uint64, hermezDb forkDb) (uint64, error) { var err error var latest uint64 From 944a4adb8e18b4b88a5d945a13846b106e5fa930 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Tue, 30 Jul 2024 09:22:17 +0000 Subject: [PATCH 07/33] organize stage in struct --- zk/stages/stage_sequence_execute.go | 146 +++++------- zk/stages/stage_sequence_execute_batch.go | 94 ++++---- .../stage_sequence_execute_batch_state.go | 148 ------------ zk/stages/stage_sequence_execute_blocks.go | 98 ++++---- .../stage_sequence_execute_data_stream.go | 14 +- .../stage_sequence_execute_injected_batch.go | 44 ++-- zk/stages/stage_sequence_execute_state.go | 210 ++++++++++++++++++ zk/stages/stage_sequence_execute_utils.go | 8 +- zk/stages/stage_sequencer_executor_verify.go | 4 +- 9 files changed, 387 insertions(+), 379 deletions(-) delete mode 100644 zk/stages/stage_sequence_execute_batch_state.go create mode 100644 zk/stages/stage_sequence_execute_state.go diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 68d171e10ce..032a6cc880f 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -9,7 +9,6 @@ import ( "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/log/v3" - mapset "github.com/deckarep/golang-set/v2" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -59,31 +58,27 @@ func SpawnSequencingStage( return err } - l1Recovery := cfg.zk.L1SyncStartBlock > 0 - thisBatch := prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed) + batchContext := newBatchContext(ctx, &cfg, s, sdb) + batchState := newBatchState(forkId, prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed), !isLastBatchPariallyProcessed && cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0) // injected batch if executionAt == 0 { - if err = processInjectedInitialBatch(ctx, cfg, s, sdb, forkId, l1Recovery); err != nil { + if err = processInjectedInitialBatch(batchContext, batchState); err != nil { return err } - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { + if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { return err } - if err = sdb.tx.Commit(); err != nil { - return err - } - - return nil + return sdb.tx.Commit() } // handle case where batch wasn't closed properly // close it before starting a new one // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted // and datastream was regenerated - if err = finalizeLastBatchInDatastreamIfNotFinalized(logPrefix, sdb, cfg.datastreamServer, lastBatch, executionAt); err != nil { + if err = finalizeLastBatchInDatastreamIfNotFinalized(batchContext, batchState, executionAt); err != nil { return err } @@ -91,15 +86,15 @@ func SpawnSequencingStage( return err } - batchCounters, err := prepareBatchCounters(&cfg, sdb, thisBatch, forkId, isLastBatchPariallyProcessed, l1Recovery) + batchCounters, err := prepareBatchCounters(batchContext, batchState, isLastBatchPariallyProcessed) if err != nil { return err } // check if we just unwound from a bad executor response and if we did just close the batch here - handled, err := doInstantCloseIfNeeded(logPrefix, &cfg, sdb, thisBatch, forkId, batchCounters) + handled, err := doInstantCloseIfNeeded(batchContext, batchState, batchCounters) if err != nil || handled { - return err // err here could be nil as well + return err } batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) @@ -107,75 +102,67 @@ func SpawnSequencingStage( nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) defer nonEmptyBatchTimer.Stop() - var builtBlocks []uint64 - - hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() - hasAnyTransactionsInThisBatch := false runLoopBlocks := true - yielded := mapset.NewSet[[32]byte]() - blockState := newBlockState(l1Recovery) - batchVerifier := NewBatchVerifier(cfg.zk, hasExecutorForThisBatch, cfg.legacyVerifier, forkId) + batchVerifier := NewBatchVerifier(cfg.zk, batchState.hasExecutorForThisBatch, cfg.legacyVerifier, batchState.forkId) streamWriter := &SequencerBatchStreamWriter{ ctx: ctx, - db: cfg.db, logPrefix: logPrefix, batchVerifier: batchVerifier, sdb: sdb, streamServer: cfg.datastreamServer, - hasExecutors: hasExecutorForThisBatch, + hasExecutors: batchState.hasExecutorForThisBatch, lastBatch: lastBatch, } blockDataSizeChecker := NewBlockDataChecker() - prevHeader := rawdb.ReadHeaderByNumber(sdb.tx, executionAt) batchDataOverflow := false - limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(thisBatch) + limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(batchState.batchNumber) limboRecovery := limboTxHash != nil - isAnyRecovery := l1Recovery || limboRecovery + isAnyRecovery := batchState.isL1Recovery() || limboRecovery // if not limbo set the limboHeaderTimestamp to the "default" value for "prepareHeader" function if !limboRecovery { limboHeaderTimestamp = math.MaxUint64 } - if l1Recovery { - if cfg.zk.L1SyncStopBatch > 0 && thisBatch > cfg.zk.L1SyncStopBatch { - log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) + if batchState.isL1Recovery() { + if cfg.zk.L1SyncStopBatch > 0 && batchState.batchNumber > cfg.zk.L1SyncStopBatch { + log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", batchState.batchNumber) time.Sleep(1 * time.Second) return nil } // let's check if we have any L1 data to recover - if err = blockState.l1RecoveryData.loadNextBatchData(sdb, thisBatch, forkId); err != nil { + if err = batchState.batchL1RecoveryData.loadBatchData(sdb, batchState.batchNumber, batchState.forkId); err != nil { return err } - if !blockState.l1RecoveryData.hasAnyDecodedBlocks() { - log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) + if !batchState.batchL1RecoveryData.hasAnyDecodedBlocks() { + log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", batchState.batchNumber) time.Sleep(1 * time.Second) return nil } - if handled, err := doCheckForBadBatch(logPrefix, sdb, blockState.l1RecoveryData, executionAt, thisBatch, forkId); err != nil || handled { + if handled, err := doCheckForBadBatch(batchContext, batchState, executionAt); err != nil || handled { return err } } if !isLastBatchPariallyProcessed { - log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, thisBatch)) + log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) } else { - log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, thisBatch, executionAt)) + log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, batchState.batchNumber, executionAt)) } var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { - log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, forkId)) + log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, batchState.forkId)) - if l1Recovery { - didLoadedAnyDataForRecovery := blockState.loadDataByDecodedBlockIndex(blockNumber - (executionAt + 1)) + if batchState.isL1Recovery() { + didLoadedAnyDataForRecovery := batchState.loadBlockL1RecoveryData(blockNumber - (executionAt + 1)) if !didLoadedAnyDataForRecovery { runLoopBlocks = false break @@ -187,12 +174,12 @@ func SpawnSequencingStage( return err } - header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, blockState.getDeltaTimestamp(), limboHeaderTimestamp, forkId, blockState.getCoinbase(&cfg)) + header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, batchState.blockState.getDeltaTimestamp(), limboHeaderTimestamp, batchState.forkId, batchState.getCoinbase(&cfg)) if err != nil { return err } - if batchDataOverflow = blockDataSizeChecker.AddBlockStartData(uint32(prevHeader.Time-header.Time), uint32(l1InfoIndex)); batchDataOverflow { + if batchDataOverflow = blockDataSizeChecker.AddBlockStartData(); batchDataOverflow { log.Info(fmt.Sprintf("[%s] BatchL2Data limit reached. Stopping.", logPrefix), "blockNumber", blockNumber) break } @@ -208,7 +195,7 @@ func SpawnSequencingStage( break } - infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, blockState, header.Time) + infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, batchState, header.Time) if err != nil { return err } @@ -217,7 +204,7 @@ func SpawnSequencingStage( ibs := state.New(sdb.stateReader) getHashFn := core.GetHashFn(header, func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) }) blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) - blockState.usedBlockElements.resetBlockBuildingArrays() + batchState.blockState.builtBlockElements.resetBlockBuildingArrays() parentRoot := parentBlock.Root() if err = handleStateForNewBlockStarting( @@ -225,7 +212,7 @@ func SpawnSequencingStage( sdb.hermezDb, ibs, blockNumber, - thisBatch, + batchState.batchNumber, header.Time, &parentRoot, l1TreeUpdate, @@ -263,45 +250,45 @@ func SpawnSequencingStage( break LOOP_TRANSACTIONS } case <-nonEmptyBatchTimer.C: - if !isAnyRecovery && hasAnyTransactionsInThisBatch { + if !isAnyRecovery && batchState.hasAnyTransactionsInThisBatch { runLoopBlocks = false break LOOP_TRANSACTIONS } default: if limboRecovery { - blockState.blockTransactions, err = getLimboTransaction(ctx, cfg, limboTxHash) + batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, limboTxHash) if err != nil { return err } - } else if !l1Recovery { - blockState.blockTransactions, err = getNextPoolTransactions(ctx, cfg, executionAt, forkId, yielded) + } else if !batchState.isL1Recovery() { + batchState.blockState.transactionsForInclusion, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) if err != nil { return err } } - if len(blockState.blockTransactions) == 0 { + if len(batchState.blockState.transactionsForInclusion) == 0 { time.Sleep(250 * time.Millisecond) } else { - log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(blockState.blockTransactions)) + log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } var receipt *types.Receipt var execResult *core.ExecutionResult - for i, transaction := range blockState.blockTransactions { + for i, transaction := range batchState.blockState.transactionsForInclusion { txHash := transaction.Hash() - effectiveGas := blockState.getL1EffectiveGases(cfg, i) + effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, l1Recovery, forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { + if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { if limboRecovery { panic("limbo transaction has already been executed once so they must not fail while re-executing") } // if we are in recovery just log the error as a warning. If the data is on the L1 then we should consider it as confirmed. // The executor/prover would simply skip a TX with an invalid nonce for example so we don't need to worry about that here. - if l1Recovery { + if batchState.isL1Recovery() { log.Warn(fmt.Sprintf("[%s] error adding transaction to batch during recovery: %v", logPrefix, err), "hash", txHash, "to", transaction.GetTo(), @@ -319,8 +306,8 @@ func SpawnSequencingStage( panic("limbo transaction has already been executed once so they must not overflow counters while re-executing") } - if !l1Recovery { - log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", thisBatch, "tx-hash", txHash, "has any transactions in this batch", hasAnyTransactionsInThisBatch) + if !batchState.isL1Recovery() { + log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", batchState.batchNumber, "tx-hash", txHash, "has any transactions in this batch", batchState.hasAnyTransactionsInThisBatch) /* There are two cases when overflow could occur. 1. The block DOES not contains any transactions. @@ -331,7 +318,7 @@ func SpawnSequencingStage( In this case, we just have to remove the transaction that overflowed the zk-counters and all transactions after it, from the yielded set. This removal will ensure that these transaction could be added in the next block(s) */ - if !hasAnyTransactionsInThisBatch { + if !batchState.hasAnyTransactionsInThisBatch { cfg.txPool.MarkForDiscardFromPendingBest(txHash) log.Trace(fmt.Sprintf("single transaction %s overflow counters", txHash)) } @@ -343,19 +330,18 @@ func SpawnSequencingStage( if err == nil { blockDataSizeChecker = &backupDataSizeChecker - yielded.Remove(txHash) - blockState.usedBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) + //TODO: Does no make any sense to remove last added tx + batchState.yieldedTransactions.Remove(txHash) + batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) - hasAnyTransactionsInThisBatch = true nonEmptyBatchTimer.Reset(cfg.zk.SequencerNonEmptyBatchSealTime) - log.Debug(fmt.Sprintf("[%s] Finish block %d with %s transaction", logPrefix, blockNumber, txHash.Hex())) } } - if l1Recovery { + if batchState.isL1Recovery() { // just go into the normal loop waiting for new transactions to signal that the recovery // has finished as far as it can go - if blockState.isThereAnyTransactionsToRecover() { + if batchState.isThereAnyTransactionsToRecover() { log.Info(fmt.Sprintf("[%s] L1 recovery no more transactions to recover", logPrefix)) } @@ -373,7 +359,7 @@ func SpawnSequencingStage( return err } - block, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, ger, l1BlockHash, &blockState.usedBlockElements, infoTreeIndexProgress, l1Recovery) + block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, infoTreeIndexProgress) if err != nil { return err } @@ -392,17 +378,17 @@ func SpawnSequencingStage( } if gasPerSecond != 0 { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(blockState.usedBlockElements.transactions), int(gasPerSecond))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(batchState.blockState.builtBlockElements.transactions), int(gasPerSecond))) } else { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(blockState.usedBlockElements.transactions))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(batchState.blockState.builtBlockElements.transactions))) } - err = sdb.hermezDb.WriteBatchCounters(thisBatch, batchCounters.CombineCollectorsNoChanges().UsedAsMap()) + err = sdb.hermezDb.WriteBatchCounters(batchState.batchNumber, batchCounters.CombineCollectorsNoChanges().UsedAsMap()) if err != nil { return err } - err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch) + err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(batchState.batchNumber) if err != nil { return err } @@ -413,40 +399,30 @@ func SpawnSequencingStage( defer sdb.tx.Rollback() // add a check to the verifier and also check for responses - builtBlocks = append(builtBlocks, blockNumber) - batchVerifier.AddNewCheck(thisBatch, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), builtBlocks) + batchState.onBuiltBlock(blockNumber) + batchVerifier.AddNewCheck(batchState.batchNumber, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks) // check for new responses from the verifier - needsUnwind, _, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, thisBatch, forkId, u) - if err != nil { + needsUnwind, _, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + if err != nil || needsUnwind { return err } - if needsUnwind { - return nil - } } for { - needsUnwind, remaining, err := updateStreamAndCheckRollback(logPrefix, sdb, streamWriter, thisBatch, forkId, u) - if err != nil { + needsUnwind, remaining, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + if err != nil || needsUnwind { return err } - if needsUnwind { - return nil - } if remaining == 0 { break } time.Sleep(50 * time.Millisecond) } - if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, block.NumberU64(), batchCounters); err != nil { - return err - } - - if err = sdb.tx.Commit(); err != nil { + if err = runBatchLastSteps(batchContext, batchState.batchNumber, block.NumberU64(), batchCounters); err != nil { return err } - return nil + return sdb.tx.Commit() } diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 7007d7fcd74..1ec8838c50f 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -7,7 +7,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" ) @@ -20,35 +19,35 @@ func prepareBatchNumber(lastBatch uint64, isLastBatchPariallyProcessed bool) uin return lastBatch + 1 } -func prepareBatchCounters(cfg *SequenceBlockCfg, sdb *stageDb, thisBatch, forkId uint64, isLastBatchPariallyProcessed, l1Recovery bool) (*vm.BatchCounterCollector, error) { +func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, isLastBatchPariallyProcessed bool) (*vm.BatchCounterCollector, error) { var intermediateUsedCounters *vm.Counters if isLastBatchPariallyProcessed { - intermediateCountersMap, found, err := sdb.hermezDb.GetBatchCounters(thisBatch) + intermediateCountersMap, found, err := batchContext.sdb.hermezDb.GetBatchCounters(batchState.batchNumber) if err != nil { return nil, err } if !found { - return nil, fmt.Errorf("intermediate counters not found for batch %d", thisBatch) + return nil, fmt.Errorf("intermediate counters not found for batch %d", batchState.batchNumber) } intermediateUsedCounters = vm.NewCountersFromUsedMap(intermediateCountersMap) } - return vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), intermediateUsedCounters), nil + return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), intermediateUsedCounters), nil } -func doInstantCloseIfNeeded(logPrefix string, cfg *SequenceBlockCfg, sdb *stageDb, thisBatch, forkId uint64, batchCounters *vm.BatchCounterCollector) (bool, error) { - instantClose, err := sdb.hermezDb.GetJustUnwound(thisBatch) +func doInstantCloseIfNeeded(batchContext *BatchContext, batchState *BatchState, batchCounters *vm.BatchCounterCollector) (bool, error) { + instantClose, err := batchContext.sdb.hermezDb.GetJustUnwound(batchState.batchNumber) if err != nil || !instantClose { return false, err // err here could be nil as well } - if err = sdb.hermezDb.DeleteJustUnwound(thisBatch); err != nil { + if err = batchContext.sdb.hermezDb.DeleteJustUnwound(batchState.batchNumber); err != nil { return false, err } // lets first check if we actually wrote any blocks in this batch - blocks, err := sdb.hermezDb.GetL2BlockNosByBatch(thisBatch) + blocks, err := batchContext.sdb.hermezDb.GetL2BlockNosByBatch(batchState.batchNumber) if err != nil { return false, err } @@ -56,39 +55,36 @@ func doInstantCloseIfNeeded(logPrefix string, cfg *SequenceBlockCfg, sdb *stageD // only close this batch down if we actually made any progress in it, otherwise // just continue processing as normal and recreate the batch from scratch if len(blocks) > 0 { - if err = runBatchLastSteps(logPrefix, cfg.datastreamServer, sdb, thisBatch, blocks[len(blocks)-1], batchCounters); err != nil { + if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { return false, err } - if err = stages.SaveStageProgress(sdb.tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { return false, err } - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return false, err } - if err = sdb.tx.Commit(); err != nil { - return false, err - } - - return true, nil + err = batchContext.sdb.tx.Commit() + return err == nil, err } return false, nil } -func doCheckForBadBatch(logPrefix string, sdb *stageDb, l1rd *L1RecoveryData, thisBlock, thisBatch, forkId uint64) (bool, error) { - infoTreeIndex, err := l1rd.getInfoTreeIndex(sdb) +func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) (bool, error) { + infoTreeIndex, err := batchState.batchL1RecoveryData.getInfoTreeIndex(batchContext.sdb) if err != nil { return false, err } // now let's detect a bad batch and skip it if we have to - currentBlock, err := rawdb.ReadBlockByNumber(sdb.tx, thisBlock) + currentBlock, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, thisBlock) if err != nil { return false, err } - badBatch, err := checkForBadBatch(thisBatch, sdb.hermezDb, currentBlock.Time(), infoTreeIndex, l1rd.nextBatchData.LimitTimestamp, l1rd.nextBatchData.DecodedData) + badBatch, err := checkForBadBatch(batchState.batchNumber, batchContext.sdb.hermezDb, currentBlock.Time(), infoTreeIndex, batchState.batchL1RecoveryData.recoveredBatchData.LimitTimestamp, batchState.batchL1RecoveryData.recoveredBatchData.DecodedData) if err != nil { return false, err } @@ -97,50 +93,48 @@ func doCheckForBadBatch(logPrefix string, sdb *stageDb, l1rd *L1RecoveryData, th return false, nil } - log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", logPrefix, thisBatch)) + log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", batchContext.s.LogPrefix(), batchState.batchNumber)) // store the fact that this batch was invalid during recovery - will be used for the stream later - if err = sdb.hermezDb.WriteInvalidBatch(thisBatch); err != nil { + if err = batchContext.sdb.hermezDb.WriteInvalidBatch(batchState.batchNumber); err != nil { return false, err } - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, map[string]int{}); err != nil { + if err = batchContext.sdb.hermezDb.WriteBatchCounters(batchState.batchNumber, map[string]int{}); err != nil { return false, err } - if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + if err = batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(batchState.batchNumber); err != nil { return false, err } - if err = stages.SaveStageProgress(sdb.tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { + if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { return false, err } - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return false, err } - if err = sdb.tx.Commit(); err != nil { + if err = batchContext.sdb.tx.Commit(); err != nil { return false, err } return true, nil } func updateStreamAndCheckRollback( - logPrefix string, - sdb *stageDb, + batchContext *BatchContext, + batchState *BatchState, streamWriter *SequencerBatchStreamWriter, - batchNumber uint64, - forkId uint64, u stagedsync.Unwinder, ) (bool, int, error) { - committed, remaining, err := streamWriter.CommitNewUpdates(forkId) + committed, remaining, err := streamWriter.CommitNewUpdates(batchState.forkId) if err != nil { return false, remaining, err } for _, commit := range committed { if !commit.Valid { // we are about to unwind so place the marker ready for this to happen - if err = sdb.hermezDb.WriteJustUnwound(batchNumber); err != nil { + if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { return false, 0, err } // capture the fork otherwise when the loop starts again to close // off the batch it will detect it as a fork upgrade - if err = sdb.hermezDb.WriteForkId(batchNumber, forkId); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return false, 0, err } @@ -148,16 +142,16 @@ func updateStreamAndCheckRollback( // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block // causing the unwind. - unwindHeader := rawdb.ReadHeaderByNumber(sdb.tx, commit.BlockNumber) + unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, commit.BlockNumber) if unwindHeader == nil { return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) } - if err = sdb.tx.Commit(); err != nil { + if err = batchContext.sdb.tx.Commit(); err != nil { return false, 0, err } - log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", logPrefix), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) u.UnwindTo(unwindTo, unwindHeader.Hash()) return true, 0, nil @@ -168,14 +162,12 @@ func updateStreamAndCheckRollback( } func runBatchLastSteps( - logPrefix string, - datastreamServer *server.DataStreamServer, - sdb *stageDb, + batchContext *BatchContext, thisBatch uint64, lastStartedBn uint64, batchCounters *vm.BatchCounterCollector, ) error { - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) + l1InfoIndex, err := batchContext.sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) if err != nil { return err } @@ -185,39 +177,39 @@ func runBatchLastSteps( return err } - log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) + log.Info(fmt.Sprintf("[%s] counters consumed", batchContext.s.LogPrefix()), "batch", thisBatch, "counts", counters.UsedAsString()) - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { + if err = batchContext.sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { return err } - if err := sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + if err := batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { return err } // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch - ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, batchContext.sdb.hermezDb.HermezDbReader, batchContext.sdb.tx) if err != nil { return err } // write ler to hermezdb - if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + if err = batchContext.sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { return err } - lastBlock, err := sdb.hermezDb.GetHighestBlockInBatch(thisBatch) + lastBlock, err := batchContext.sdb.hermezDb.GetHighestBlockInBatch(thisBatch) if err != nil { return err } - block, err := rawdb.ReadBlockByNumber(sdb.tx, lastBlock) + block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, lastBlock) if err != nil { return err } blockRoot := block.Root() - if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { return err } - log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) + log.Info(fmt.Sprintf("[%s] Finish batch %d...", batchContext.s.LogPrefix(), thisBatch)) return nil } diff --git a/zk/stages/stage_sequence_execute_batch_state.go b/zk/stages/stage_sequence_execute_batch_state.go deleted file mode 100644 index 8f5cef54313..00000000000 --- a/zk/stages/stage_sequence_execute_batch_state.go +++ /dev/null @@ -1,148 +0,0 @@ -package stages - -import ( - "fmt" - "math" - - "github.com/gateway-fm/cdk-erigon-lib/common" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/zk/l1_data" - zktx "github.com/ledgerwatch/erigon/zk/tx" -) - -// TYPE BLOCK STATE -type BlockState struct { - blockTransactions []types.Transaction - l1RecoveryData *L1RecoveryData - usedBlockElements UsedBlockElements -} - -func newBlockState(l1Recovery bool) *BlockState { - blockState := &BlockState{} - - if l1Recovery { - blockState.l1RecoveryData = newL1RecoveryData() - } - - return blockState -} - -func (bs *BlockState) isL1Recovery() bool { - return bs.l1RecoveryData != nil -} - -func (bs *BlockState) isThereAnyTransactionsToRecover() bool { - if !bs.isL1Recovery() { - return false - } - - return len(bs.blockTransactions) != 0 || bs.l1RecoveryData.nextBatchData.IsWorkRemaining -} - -func (bs *BlockState) loadDataByDecodedBlockIndex(decodedBlocksIndex uint64) bool { - if !bs.l1RecoveryData.loadDataByDecodedBlockIndex(decodedBlocksIndex) { - return false - } - - bs.blockTransactions = bs.l1RecoveryData.decodedBlock.Transactions - return true -} - -func (bs *BlockState) getDeltaTimestamp() uint64 { - if bs.l1RecoveryData != nil { - return uint64(bs.l1RecoveryData.decodedBlock.DeltaTimestamp) - } - - return math.MaxUint64 -} - -func (bs *BlockState) getCoinbase(cfg *SequenceBlockCfg) common.Address { - if bs.l1RecoveryData != nil { - return bs.l1RecoveryData.nextBatchData.Coinbase - } - - return cfg.zk.AddressSequencer -} - -func (bs *BlockState) getL1EffectiveGases(cfg SequenceBlockCfg, i int) uint8 { - if bs.isL1Recovery() { - return bs.l1RecoveryData.decodedBlock.EffectiveGasPricePercentages[i] - } - - return DeriveEffectiveGasPrice(cfg, bs.blockTransactions[i]) -} - -// TYPE BLOCK ELEMENTS -type UsedBlockElements struct { - transactions []types.Transaction - receipts types.Receipts - effectiveGases []uint8 - executionResults []*core.ExecutionResult -} - -func (ube *UsedBlockElements) resetBlockBuildingArrays() { - ube.transactions = []types.Transaction{} - ube.receipts = types.Receipts{} - ube.effectiveGases = []uint8{} - ube.executionResults = []*core.ExecutionResult{} -} - -func (ube *UsedBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { - ube.transactions = append(ube.transactions, transaction) - ube.receipts = append(ube.receipts, receipt) - ube.executionResults = append(ube.executionResults, execResult) - ube.effectiveGases = append(ube.effectiveGases, effectiveGas) -} - -// TYPE L1 RECOVERY DATA -type L1RecoveryData struct { - decodedBlocksSize uint64 - decodedBlock *zktx.DecodedBatchL2Data - nextBatchData *l1_data.DecodedL1Data -} - -func newL1RecoveryData() *L1RecoveryData { - return &L1RecoveryData{} -} - -func (l1rd *L1RecoveryData) loadNextBatchData(sdb *stageDb, thisBatch, forkId uint64) (err error) { - l1rd.nextBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) - if err != nil { - return err - } - - l1rd.decodedBlocksSize = uint64(len(l1rd.nextBatchData.DecodedData)) - return nil -} - -func (l1rd *L1RecoveryData) hasAnyDecodedBlocks() bool { - return l1rd.decodedBlocksSize > 0 -} - -func (l1rd *L1RecoveryData) getInfoTreeIndex(sdb *stageDb) (uint64, error) { - var infoTreeIndex uint64 - - if l1rd.nextBatchData.L1InfoRoot == SpecialZeroIndexHash { - return uint64(0), nil - } - - infoTreeIndex, found, err := sdb.hermezDb.GetL1InfoTreeIndexByRoot(l1rd.nextBatchData.L1InfoRoot) - if err != nil { - return uint64(0), err - } - if !found { - return uint64(0), fmt.Errorf("could not find L1 info tree index for root %s", l1rd.nextBatchData.L1InfoRoot.String()) - } - - return infoTreeIndex, nil -} - -func (l1rd *L1RecoveryData) loadDataByDecodedBlockIndex(decodedBlocksIndex uint64) bool { - if decodedBlocksIndex == l1rd.decodedBlocksSize { - return false - } - - l1rd.decodedBlock = &l1rd.nextBatchData.DecodedData[decodedBlocksIndex] - return true -} diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index e48eb92427f..1ef4ccef675 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -1,7 +1,6 @@ package stages import ( - "context" "fmt" "github.com/gateway-fm/cdk-erigon-lib/common" @@ -16,7 +15,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/smt/pkg/blockinfo" - "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/zk/erigon_db" "github.com/ledgerwatch/erigon/zk/hermez_db" zktypes "github.com/ledgerwatch/erigon/zk/types" @@ -67,67 +65,54 @@ func handleStateForNewBlockStarting( } func doFinishBlockAndUpdateState( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, + batchContext *BatchContext, ibs *state.IntraBlockState, header *types.Header, parentBlock *types.Block, - forkId uint64, - thisBatch uint64, + batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, - usedBlockElements *UsedBlockElements, l1InfoIndex uint64, - l1Recovery bool, ) (*types.Block, error) { thisBlockNumber := header.Number.Uint64() - if cfg.accumulator != nil { - cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) + if batchContext.cfg.accumulator != nil { + batchContext.cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) } - block, err := finaliseBlock(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, cfg.accumulator, ger, l1BlockHash, usedBlockElements, l1Recovery) + block, err := finaliseBlock(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash) if err != nil { return nil, err } - if err := updateSequencerProgress(sdb.tx, thisBlockNumber, thisBatch, l1InfoIndex); err != nil { + if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, l1InfoIndex); err != nil { return nil, err } - if cfg.accumulator != nil { - txs, err := rawdb.RawTransactionsRange(sdb.tx, thisBlockNumber, thisBlockNumber) + if batchContext.cfg.accumulator != nil { + txs, err := rawdb.RawTransactionsRange(batchContext.sdb.tx, thisBlockNumber, thisBlockNumber) if err != nil { return nil, err } - cfg.accumulator.ChangeTransactions(txs) + batchContext.cfg.accumulator.ChangeTransactions(txs) } return block, nil } func finaliseBlock( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, + batchContext *BatchContext, ibs *state.IntraBlockState, newHeader *types.Header, parentBlock *types.Block, - forkId uint64, - batch uint64, - accumulator *shards.Accumulator, + batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, - usedBlockElements *UsedBlockElements, - l1Recovery bool, ) (*types.Block, error) { - stateWriter := state.NewPlainStateWriter(sdb.tx, sdb.tx, newHeader.Number.Uint64()).SetAccumulator(accumulator) + stateWriter := state.NewPlainStateWriter(batchContext.sdb.tx, batchContext.sdb.tx, newHeader.Number.Uint64()).SetAccumulator(batchContext.cfg.accumulator) chainReader := stagedsync.ChainReader{ - Cfg: *cfg.chainConfig, - Db: sdb.tx, + Cfg: *batchContext.cfg.chainConfig, + Db: batchContext.sdb.tx, } var excessDataGas *big.Int @@ -136,48 +121,49 @@ func finaliseBlock( } txInfos := []blockinfo.ExecutedTxInfo{} - for i, tx := range usedBlockElements.transactions { + builtBlockElements := batchState.blockState.builtBlockElements + for i, tx := range builtBlockElements.transactions { var from common.Address var err error sender, ok := tx.GetSender() if ok { from = sender } else { - signer := types.MakeSigner(cfg.chainConfig, newHeader.Number.Uint64()) + signer := types.MakeSigner(batchContext.cfg.chainConfig, newHeader.Number.Uint64()) from, err = tx.Sender(*signer) if err != nil { return nil, err } } - localReceipt := core.CreateReceiptForBlockInfoTree(usedBlockElements.receipts[i], cfg.chainConfig, newHeader.Number.Uint64(), usedBlockElements.executionResults[i]) + localReceipt := core.CreateReceiptForBlockInfoTree(builtBlockElements.receipts[i], batchContext.cfg.chainConfig, newHeader.Number.Uint64(), builtBlockElements.executionResults[i]) txInfos = append(txInfos, blockinfo.ExecutedTxInfo{ Tx: tx, - EffectiveGasPrice: usedBlockElements.effectiveGases[i], + EffectiveGasPrice: builtBlockElements.effectiveGases[i], Receipt: localReceipt, Signer: &from, }) } - if err := postBlockStateHandling(cfg, ibs, sdb.hermezDb, newHeader, ger, l1BlockHash, parentBlock.Root(), txInfos); err != nil { + if err := postBlockStateHandling(*batchContext.cfg, ibs, batchContext.sdb.hermezDb, newHeader, ger, l1BlockHash, parentBlock.Root(), txInfos); err != nil { return nil, err } - if l1Recovery { - for i, receipt := range usedBlockElements.receipts { - core.ProcessReceiptForBlockExecution(receipt, sdb.hermezDb.HermezDbReader, cfg.chainConfig, newHeader.Number.Uint64(), newHeader, usedBlockElements.transactions[i]) + if batchState.isL1Recovery() { + for i, receipt := range builtBlockElements.receipts { + core.ProcessReceiptForBlockExecution(receipt, batchContext.sdb.hermezDb.HermezDbReader, batchContext.cfg.chainConfig, newHeader.Number.Uint64(), newHeader, builtBlockElements.transactions[i]) } } finalBlock, finalTransactions, finalReceipts, err := core.FinalizeBlockExecutionWithHistoryWrite( - cfg.engine, - sdb.stateReader, + batchContext.cfg.engine, + batchContext.sdb.stateReader, newHeader, - usedBlockElements.transactions, + builtBlockElements.transactions, []*types.Header{}, // no uncles stateWriter, - cfg.chainConfig, + batchContext.cfg.chainConfig, ibs, - usedBlockElements.receipts, + builtBlockElements.receipts, nil, // no withdrawals chainReader, true, @@ -187,55 +173,55 @@ func finaliseBlock( return nil, err } - newRoot, err := zkIncrementIntermediateHashes(ctx, s.LogPrefix(), s, sdb.tx, sdb.eridb, sdb.smt, newHeader.Number.Uint64()-1, newHeader.Number.Uint64()) + newRoot, err := zkIncrementIntermediateHashes(batchContext.ctx, batchContext.s.LogPrefix(), batchContext.s, batchContext.sdb.tx, batchContext.sdb.eridb, batchContext.sdb.smt, newHeader.Number.Uint64()-1, newHeader.Number.Uint64()) if err != nil { return nil, err } finalHeader := finalBlock.HeaderNoCopy() finalHeader.Root = newRoot - finalHeader.Coinbase = cfg.zk.AddressSequencer - finalHeader.GasLimit = utils.GetBlockGasLimitForFork(forkId) - finalHeader.ReceiptHash = types.DeriveSha(usedBlockElements.receipts) - finalHeader.Bloom = types.CreateBloom(usedBlockElements.receipts) + finalHeader.Coinbase = batchContext.cfg.zk.AddressSequencer + finalHeader.GasLimit = utils.GetBlockGasLimitForFork(batchState.forkId) + finalHeader.ReceiptHash = types.DeriveSha(builtBlockElements.receipts) + finalHeader.Bloom = types.CreateBloom(builtBlockElements.receipts) newNum := finalBlock.Number() - err = rawdb.WriteHeader_zkEvm(sdb.tx, finalHeader) + err = rawdb.WriteHeader_zkEvm(batchContext.sdb.tx, finalHeader) if err != nil { return nil, fmt.Errorf("failed to write header: %v", err) } - if err := rawdb.WriteHeadHeaderHash(sdb.tx, finalHeader.Hash()); err != nil { + if err := rawdb.WriteHeadHeaderHash(batchContext.sdb.tx, finalHeader.Hash()); err != nil { return nil, err } - err = rawdb.WriteCanonicalHash(sdb.tx, finalHeader.Hash(), newNum.Uint64()) + err = rawdb.WriteCanonicalHash(batchContext.sdb.tx, finalHeader.Hash(), newNum.Uint64()) if err != nil { return nil, fmt.Errorf("failed to write header: %v", err) } - erigonDB := erigon_db.NewErigonDb(sdb.tx) + erigonDB := erigon_db.NewErigonDb(batchContext.sdb.tx) err = erigonDB.WriteBody(newNum, finalHeader.Hash(), finalTransactions) if err != nil { return nil, fmt.Errorf("failed to write body: %v", err) } // write the new block lookup entries - rawdb.WriteTxLookupEntries(sdb.tx, finalBlock) + rawdb.WriteTxLookupEntries(batchContext.sdb.tx, finalBlock) - if err = rawdb.WriteReceipts(sdb.tx, newNum.Uint64(), finalReceipts); err != nil { + if err = rawdb.WriteReceipts(batchContext.sdb.tx, newNum.Uint64(), finalReceipts); err != nil { return nil, err } - if err = sdb.hermezDb.WriteForkId(batch, forkId); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return nil, err } // now process the senders to avoid a stage by itself - if err := addSenders(cfg, newNum, finalTransactions, sdb.tx, finalHeader); err != nil { + if err := addSenders(*batchContext.cfg, newNum, finalTransactions, batchContext.sdb.tx, finalHeader); err != nil { return nil, err } // now add in the zk batch to block references - if err := sdb.hermezDb.WriteBlockBatch(newNum.Uint64(), batch); err != nil { + if err := batchContext.sdb.hermezDb.WriteBlockBatch(newNum.Uint64(), batchState.batchNumber); err != nil { return nil, fmt.Errorf("write block batch error: %v", err) } diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index ecc1c4adcda..7ca0da97426 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/zk/datastream/server" verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" @@ -14,7 +13,6 @@ import ( type SequencerBatchStreamWriter struct { ctx context.Context - db kv.RwDB logPrefix string batchVerifier *BatchVerifier sdb *stageDb @@ -89,8 +87,8 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetails(verifiedBundles []*veri return written, nil } -func finalizeLastBatchInDatastreamIfNotFinalized(logPrefix string, sdb *stageDb, datastreamServer *server.DataStreamServer, thisBatch, thisBlock uint64) error { - isLastEntryBatchEnd, err := datastreamServer.IsLastEntryBatchEnd() +func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) error { + isLastEntryBatchEnd, err := batchContext.cfg.datastreamServer.IsLastEntryBatchEnd() if err != nil { return err } @@ -99,18 +97,18 @@ func finalizeLastBatchInDatastreamIfNotFinalized(logPrefix string, sdb *stageDb, return nil } - log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, thisBatch)) - ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, sdb.tx) + log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", batchContext.s.LogPrefix(), batchState.batchNumber)) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(batchState.batchNumber, batchContext.sdb.hermezDb.HermezDbReader, batchContext.sdb.tx) if err != nil { return err } - lastBlock, err := rawdb.ReadBlockByNumber(sdb.tx, thisBlock) + lastBlock, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, thisBlock) if err != nil { return err } root := lastBlock.Root() - if err = datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, thisBatch-1, &root, &ler); err != nil { + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchState.batchNumber, batchState.batchNumber-1, &root, &ler); err != nil { return err } return nil diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index be957183178..492fa98afea 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -1,7 +1,6 @@ package stages import ( - "context" "math" "errors" @@ -13,41 +12,37 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/eth/stagedsync" zktx "github.com/ledgerwatch/erigon/zk/tx" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/erigon/zk/utils" ) const ( - injectedBatchNumber = 1 injectedBatchBlockNumber = 1 injectedBatchBatchNumber = 1 ) func processInjectedInitialBatch( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, - forkId uint64, - l1Recovery bool, + batchContext *BatchContext, + batchState *BatchState, ) error { // set the block height for the fork we're running at to ensure contract interactions are correct - if err := utils.RecoverySetBlockConfigForks(injectedBatchBlockNumber, forkId, cfg.chainConfig, s.LogPrefix()); err != nil { + if err := utils.RecoverySetBlockConfigForks(injectedBatchBlockNumber, batchState.forkId, batchContext.cfg.chainConfig, batchContext.s.LogPrefix()); err != nil { return err } - header, parentBlock, err := prepareHeader(sdb.tx, 0, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) + header, parentBlock, err := prepareHeader(batchContext.sdb.tx, 0, math.MaxUint64, math.MaxUint64, batchState.forkId, batchContext.cfg.zk.AddressSequencer) if err != nil { return err } - getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) } + getHeader := func(hash common.Hash, number uint64) *types.Header { + return rawdb.ReadHeader(batchContext.sdb.tx, hash, number) + } getHashFn := core.GetHashFn(header, getHeader) - blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) + blockContext := core.NewEVMBlockContext(header, getHashFn, batchContext.cfg.engine, &batchContext.cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) - injected, err := sdb.hermezDb.GetL1InjectedBatch(0) + injected, err := batchContext.sdb.hermezDb.GetL1InjectedBatch(0) if err != nil { return err } @@ -58,15 +53,15 @@ func processInjectedInitialBatch( Timestamp: injected.Timestamp, } - ibs := state.New(sdb.stateReader) + ibs := state.New(batchContext.sdb.stateReader) // the injected batch block timestamp should also match that of the injected batch header.Time = injected.Timestamp parentRoot := parentBlock.Root() if err = handleStateForNewBlockStarting( - cfg.chainConfig, - sdb.hermezDb, + batchContext.cfg.chainConfig, + batchContext.sdb.hermezDb, ibs, injectedBatchBlockNumber, injectedBatchBatchNumber, @@ -78,25 +73,24 @@ func processInjectedInitialBatch( return err } - txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(cfg, sdb, ibs, &blockContext, injected, header, parentBlock, forkId) + txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(batchContext, ibs, &blockContext, injected, header, parentBlock, batchState.forkId) if err != nil { return err } - usedBlockElements := &UsedBlockElements{ + batchState.blockState.builtBlockElements = BuiltBlockElements{ transactions: types.Transactions{*txn}, receipts: types.Receipts{receipt}, executionResults: []*core.ExecutionResult{execResult}, effectiveGases: []uint8{effectiveGas}, } - _, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, injectedBatchNumber, injected.LastGlobalExitRoot, injected.L1ParentHash, usedBlockElements, 0, l1Recovery) + _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash, 0) return err } func handleInjectedBatch( - cfg SequenceBlockCfg, - sdb *stageDb, + batchContext *BatchContext, ibs *state.IntraBlockState, blockContext *evmtypes.BlockContext, injected *zktypes.L1InjectedBatch, @@ -115,11 +109,11 @@ func handleInjectedBatch( return nil, nil, nil, 0, errors.New("expected 1 transaction in the injected batch") } - batchCounters := vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(false), nil) + batchCounters := vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(false), nil) // process the tx and we can ignore the counters as an overflow at this stage means no network anyway - effectiveGas := DeriveEffectiveGasPrice(cfg, decodedBlocks[0].Transactions[0]) - receipt, execResult, _, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) + effectiveGas := DeriveEffectiveGasPrice(*batchContext.cfg, decodedBlocks[0].Transactions[0]) + receipt, execResult, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) if err != nil { return nil, nil, nil, 0, err } diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go new file mode 100644 index 00000000000..623acdf1950 --- /dev/null +++ b/zk/stages/stage_sequence_execute_state.go @@ -0,0 +1,210 @@ +package stages + +import ( + "context" + "fmt" + "math" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/zk/l1_data" + zktx "github.com/ledgerwatch/erigon/zk/tx" +) + +type BatchContext struct { + ctx context.Context + cfg *SequenceBlockCfg + s *stagedsync.StageState + sdb *stageDb +} + +func newBatchContext(ctx context.Context, cfg *SequenceBlockCfg, s *stagedsync.StageState, sdb *stageDb) *BatchContext { + return &BatchContext{ + ctx: ctx, + cfg: cfg, + s: s, + sdb: sdb, + } +} + +// TYPE BATCH STATE +type BatchState struct { + forkId uint64 + batchNumber uint64 + hasExecutorForThisBatch bool + hasAnyTransactionsInThisBatch bool + builtBlocks []uint64 + yieldedTransactions mapset.Set[[32]byte] + blockState *BlockState + batchL1RecoveryData *BatchL1RecoveryData +} + +func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recovery bool) *BatchState { + blockState := &BatchState{ + forkId: forkId, + batchNumber: batchNumber, + hasExecutorForThisBatch: hasExecutorForThisBatch, + hasAnyTransactionsInThisBatch: false, + builtBlocks: make([]uint64, 0, 128), + yieldedTransactions: mapset.NewSet[[32]byte](), + blockState: newBlockState(), + batchL1RecoveryData: nil, + } + + if l1Recovery { + blockState.batchL1RecoveryData = newBatchL1RecoveryData() + } + + return blockState +} + +func (bs *BatchState) isL1Recovery() bool { + return bs.batchL1RecoveryData != nil +} + +func (bs *BatchState) isThereAnyTransactionsToRecover() bool { + if !bs.isL1Recovery() { + return false + } + + return bs.blockState.hasAnyTransactionForInclusion() || bs.batchL1RecoveryData.recoveredBatchData.IsWorkRemaining +} + +func (bs *BatchState) loadBlockL1RecoveryData(decodedBlocksIndex uint64) bool { + decodedBatchL2Data, found := bs.batchL1RecoveryData.getDecodedL1RecoveredBatchDataByIndex(decodedBlocksIndex) + bs.blockState.setBlockL1RecoveryData(decodedBatchL2Data) + return found +} + +func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { + if bs.batchL1RecoveryData != nil { + return bs.batchL1RecoveryData.recoveredBatchData.Coinbase + } + + return cfg.zk.AddressSequencer +} + +func (bs *BatchState) onAddedTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { + bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) + bs.hasAnyTransactionsInThisBatch = true +} + +func (bs *BatchState) onBuiltBlock(blockNumber uint64) { + bs.builtBlocks = append(bs.builtBlocks, blockNumber) +} + +// TYPE BATCH L1 RECOVERY DATA +type BatchL1RecoveryData struct { + recoveredBatchDataSize int + recoveredBatchData *l1_data.DecodedL1Data +} + +func newBatchL1RecoveryData() *BatchL1RecoveryData { + return &BatchL1RecoveryData{} +} + +func (batchL1RecoveryData *BatchL1RecoveryData) loadBatchData(sdb *stageDb, thisBatch, forkId uint64) (err error) { + batchL1RecoveryData.recoveredBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) + if err != nil { + return err + } + + batchL1RecoveryData.recoveredBatchDataSize = len(batchL1RecoveryData.recoveredBatchData.DecodedData) + return nil +} + +func (batchL1RecoveryData *BatchL1RecoveryData) hasAnyDecodedBlocks() bool { + return batchL1RecoveryData.recoveredBatchDataSize > 0 +} + +func (batchL1RecoveryData *BatchL1RecoveryData) getInfoTreeIndex(sdb *stageDb) (uint64, error) { + var infoTreeIndex uint64 + + if batchL1RecoveryData.recoveredBatchData.L1InfoRoot == SpecialZeroIndexHash { + return uint64(0), nil + } + + infoTreeIndex, found, err := sdb.hermezDb.GetL1InfoTreeIndexByRoot(batchL1RecoveryData.recoveredBatchData.L1InfoRoot) + if err != nil { + return uint64(0), err + } + if !found { + return uint64(0), fmt.Errorf("could not find L1 info tree index for root %s", batchL1RecoveryData.recoveredBatchData.L1InfoRoot.String()) + } + + return infoTreeIndex, nil +} + +func (batchL1RecoveryData *BatchL1RecoveryData) getDecodedL1RecoveredBatchDataByIndex(decodedBlocksIndex uint64) (*zktx.DecodedBatchL2Data, bool) { + if decodedBlocksIndex == uint64(batchL1RecoveryData.recoveredBatchDataSize) { + return nil, false + } + + return &batchL1RecoveryData.recoveredBatchData.DecodedData[decodedBlocksIndex], true +} + +// TYPE BLOCK STATE +type BlockState struct { + transactionsForInclusion []types.Transaction + builtBlockElements BuiltBlockElements + blockL1RecoveryData *zktx.DecodedBatchL2Data +} + +func newBlockState() *BlockState { + return &BlockState{} +} + +func (bs *BlockState) hasAnyTransactionForInclusion() bool { + return len(bs.transactionsForInclusion) > 0 +} + +func (bs *BlockState) setBlockL1RecoveryData(blockL1RecoveryData *zktx.DecodedBatchL2Data) { + bs.blockL1RecoveryData = blockL1RecoveryData + + if bs.blockL1RecoveryData != nil { + bs.transactionsForInclusion = bs.blockL1RecoveryData.Transactions + } else { + bs.transactionsForInclusion = []types.Transaction{} + } +} + +func (bs *BlockState) getDeltaTimestamp() uint64 { + if bs.blockL1RecoveryData != nil { + return uint64(bs.blockL1RecoveryData.DeltaTimestamp) + } + + return math.MaxUint64 +} + +func (bs *BlockState) getL1EffectiveGases(cfg SequenceBlockCfg, i int) uint8 { + if bs.blockL1RecoveryData != nil { + return bs.blockL1RecoveryData.EffectiveGasPricePercentages[i] + } + + return DeriveEffectiveGasPrice(cfg, bs.transactionsForInclusion[i]) +} + +// TYPE BLOCK ELEMENTS +type BuiltBlockElements struct { + transactions []types.Transaction + receipts types.Receipts + effectiveGases []uint8 + executionResults []*core.ExecutionResult +} + +func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { + bbe.transactions = []types.Transaction{} + bbe.receipts = types.Receipts{} + bbe.effectiveGases = []uint8{} + bbe.executionResults = []*core.ExecutionResult{} +} + +func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { + bbe.transactions = append(bbe.transactions, transaction) + bbe.receipts = append(bbe.receipts, receipt) + bbe.executionResults = append(bbe.executionResults, execResult) + bbe.effectiveGases = append(bbe.effectiveGases, effectiveGas) +} diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 630129e49d2..c19b1716210 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -242,7 +242,7 @@ func prepareHeader(tx kv.RwTx, previousBlockNumber, deltaTimestamp, forcedTimest }, parentBlock, nil } -func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, bs *BlockState, proposedTimestamp uint64) ( +func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, proposedTimestamp uint64) ( infoTreeIndexProgress uint64, l1TreeUpdate *zktypes.L1InfoTreeUpdate, l1TreeUpdateIndex uint64, @@ -260,8 +260,8 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, bs *BlockState, proposedTime return } - if bs.isL1Recovery() { - l1TreeUpdateIndex = uint64(bs.l1RecoveryData.decodedBlock.L1InfoTreeIndex) + if batchState.isL1Recovery() { + l1TreeUpdateIndex = uint64(batchState.blockState.blockL1RecoveryData.L1InfoTreeIndex) if l1TreeUpdate, err = sdb.hermezDb.GetL1InfoTreeUpdate(l1TreeUpdateIndex); err != nil { return } @@ -396,7 +396,7 @@ func NewBlockDataChecker() *BlockDataChecker { // adds bytes amounting to the block data and checks if the limit is reached // if the limit is reached, the data is not added, so this can be reused again for next check -func (bdc *BlockDataChecker) AddBlockStartData(deltaTimestamp, l1InfoTreeIndex uint32) bool { +func (bdc *BlockDataChecker) AddBlockStartData() bool { blockStartBytesAmount := tx.START_BLOCK_BATCH_L2_DATA_SIZE // tx.GenerateStartBlockBatchL2Data(deltaTimestamp, l1InfoTreeIndex) returns 65 long byte array // add in the changeL2Block transaction if bdc.counter+blockStartBytesAmount > bdc.limit { diff --git a/zk/stages/stage_sequencer_executor_verify.go b/zk/stages/stage_sequencer_executor_verify.go index 769ba251fd5..98020c3a388 100644 --- a/zk/stages/stage_sequencer_executor_verify.go +++ b/zk/stages/stage_sequencer_executor_verify.go @@ -266,8 +266,8 @@ func SpawnSequencerExecutorVerifyStage( // // send off the new batches to the verifier to be processed // for batch := progress + 1; batch <= latestBatch; batch++ { // // we do not need to verify batch 1 as this is the injected batch so just updated progress and move on - // if batch == injectedBatchNumber { - // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchNumber); err != nil { + // if batch == injectedBatchBatchNumber { + // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchBatchNumber); err != nil { // return err // } // } else { From a677ba75c2c2f0748ebe7a0765cd234ba351fdbe Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Tue, 30 Jul 2024 10:17:43 +0000 Subject: [PATCH 08/33] verifier --- eth/backend.go | 3 +-- .../legacy_executor_verifier.go | 22 ++++--------------- zk/stages/stage_sequence_execute.go | 6 ++--- zk/stages/stage_sequence_execute_limbo.go | 2 +- zk/stages/stage_sequence_execute_verifier.go | 9 ++++---- 5 files changed, 12 insertions(+), 30 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 33f55b8fa71..68ecb211a8a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -836,7 +836,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.engine, ) - var legacyExecutors []legacy_executor_verifier.ILegacyExecutor + var legacyExecutors []*legacy_executor_verifier.Executor = make([]*legacy_executor_verifier.Executor, 0, len(cfg.ExecutorUrls)) if len(cfg.ExecutorUrls) > 0 && cfg.ExecutorUrls[0] != "" { levCfg := legacy_executor_verifier.Config{ GrpcUrls: cfg.ExecutorUrls, @@ -856,7 +856,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.chainConfig, backend.chainDB, witnessGenerator, - backend.l1Syncer, backend.dataStream, ) diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 68a351b90fc..f022e2163dc 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -22,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier/proto/github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" ) @@ -75,14 +74,6 @@ func NewVerifierBundle(request *VerifierRequest, response *VerifierResponse) *Ve } } -type ILegacyExecutor interface { - Verify(*Payload, *VerifierRequest, common.Hash) (bool, *executor.ProcessBatchResponseV2, error) - CheckOnline() bool - QueueLength() int - AquireAccess() - ReleaseAccess() -} - type WitnessGenerator interface { GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startBlock, endBlock uint64, debug, witnessFull bool) ([]byte, error) } @@ -90,16 +81,14 @@ type WitnessGenerator interface { type LegacyExecutorVerifier struct { db kv.RwDB cfg ethconfig.Zk - executors []ILegacyExecutor + executors []*Executor executorNumber int cancelAllVerifications atomic.Bool quit chan struct{} streamServer *server.DataStreamServer - stream *datastreamer.StreamServer witnessGenerator WitnessGenerator - l1Syncer *syncer.L1Syncer promises []*Promise[*VerifierBundle] addedBatches map[uint64]struct{} @@ -114,11 +103,10 @@ type LegacyExecutorVerifier struct { func NewLegacyExecutorVerifier( cfg ethconfig.Zk, - executors []ILegacyExecutor, + executors []*Executor, chainCfg *chain.Config, db kv.RwDB, witnessGenerator WitnessGenerator, - l1Syncer *syncer.L1Syncer, stream *datastreamer.StreamServer, ) *LegacyExecutorVerifier { streamServer := server.NewDataStreamServer(stream, chainCfg.ChainID.Uint64()) @@ -130,9 +118,7 @@ func NewLegacyExecutorVerifier( cancelAllVerifications: atomic.Bool{}, quit: make(chan struct{}), streamServer: streamServer, - stream: stream, witnessGenerator: witnessGenerator, - l1Syncer: l1Syncer, promises: make([]*Promise[*VerifierBundle], 0), addedBatches: make(map[uint64]struct{}), responsesToWrite: map[uint64]struct{}{}, @@ -560,8 +546,8 @@ func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *her return nil } -func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() ILegacyExecutor { - var exec ILegacyExecutor +func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { + var exec *Executor // TODO: find executors with spare capacity diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 032a6cc880f..e4bd6548088 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -103,6 +103,8 @@ func SpawnSequencingStage( defer nonEmptyBatchTimer.Stop() runLoopBlocks := true + blockDataSizeChecker := NewBlockDataChecker() + batchDataOverflow := false batchVerifier := NewBatchVerifier(cfg.zk, batchState.hasExecutorForThisBatch, cfg.legacyVerifier, batchState.forkId) streamWriter := &SequencerBatchStreamWriter{ @@ -115,10 +117,6 @@ func SpawnSequencingStage( lastBatch: lastBatch, } - blockDataSizeChecker := NewBlockDataChecker() - - batchDataOverflow := false - limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(batchState.batchNumber) limboRecovery := limboTxHash != nil isAnyRecovery := batchState.isL1Recovery() || limboRecovery diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index 84df2a5bdf3..a022d456828 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -95,7 +95,7 @@ func handleLimbo( limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - log.Info(fmt.Sprintf("[%s] adding transaction to limbo", logPrefix, "hash", hash)) + log.Info(fmt.Sprintf("[%s] adding transaction to limbo", logPrefix), "hash", hash) } } diff --git a/zk/stages/stage_sequence_execute_verifier.go b/zk/stages/stage_sequence_execute_verifier.go index c07b5e8ec92..a0b12eab3a1 100644 --- a/zk/stages/stage_sequence_execute_verifier.go +++ b/zk/stages/stage_sequence_execute_verifier.go @@ -3,7 +3,6 @@ package stages import ( "errors" "fmt" - "math/rand" "sync" "github.com/gateway-fm/cdk-erigon-lib/common" @@ -160,12 +159,12 @@ func (bv *BatchVerifier) removeProcessedPromises(processed int) int { } func (bv *BatchVerifier) syncPromise(request *verifier.VerifierRequest, blockNumbers []uint64) *PromiseWithBlocks { + valid := true // simulate a die roll to determine if this is a good batch or not // 1 in 6 chance of being a bad batch - valid := true - if rand.Intn(6) == 0 { - valid = false - } + // if rand.Intn(6) == 0 { + // valid = false + // } promise := verifier.NewPromiseSync[*verifier.VerifierBundleWithBlocks](func() (*verifier.VerifierBundleWithBlocks, error) { response := &verifier.VerifierResponse{ From f2710ade135a66c89e521f507f983a9c5b819a8e Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Tue, 30 Jul 2024 10:30:39 +0000 Subject: [PATCH 09/33] update delete blocks func --- zk/hermez_db/db.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index ef40d577c3d..4f291665429 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -890,7 +890,7 @@ func (db *HermezDb) DeleteBlockL1InfoTreeIndexes(fromBlockNum, toBlockNum uint64 // from and to are inclusive func (db *HermezDb) DeleteBlockBatches(fromBlockNum, toBlockNum uint64) error { // first, gather batch numbers related to the blocks we're about to delete - batchNos := make([]uint64, 0) + batchNumbersMap := map[uint64]struct{}{} // find all the batches involved for i := fromBlockNum; i <= toBlockNum; i++ { @@ -898,28 +898,19 @@ func (db *HermezDb) DeleteBlockBatches(fromBlockNum, toBlockNum uint64) error { if err != nil { return err } - found := false - for _, b := range batchNos { - if b == batch { - found = true - break - } - } - if !found { - batchNos = append(batchNos, batch) - } + batchNumbersMap[batch] = struct{}{} } // now for each batch go and get the block numbers and remove them from the batch to block records - for _, batchNo := range batchNos { - data, err := db.tx.GetOne(BATCH_BLOCKS, Uint64ToBytes(batchNo)) + for batchNumber := range batchNumbersMap { + data, err := db.tx.GetOne(BATCH_BLOCKS, Uint64ToBytes(batchNumber)) if err != nil { return err } blockNos := parseConcatenatedBlockNumbers(data) // make a new list excluding the blocks in our range - newBlockNos := make([]uint64, 0) + newBlockNos := make([]uint64, 0, len(blockNos)) for _, blockNo := range blockNos { if blockNo < fromBlockNum || blockNo > toBlockNum { newBlockNos = append(newBlockNos, blockNo) @@ -929,8 +920,15 @@ func (db *HermezDb) DeleteBlockBatches(fromBlockNum, toBlockNum uint64) error { // concatenate the block numbers back again newData := concatenateBlockNumbers(newBlockNos) - // now store it back - err = db.tx.Put(BATCH_BLOCKS, Uint64ToBytes(batchNo), newData) + // now delete/store it back + if len(newData) == 0 { + err = db.tx.Delete(BATCH_BLOCKS, Uint64ToBytes(batchNumber)) + } else { + err = db.tx.Put(BATCH_BLOCKS, Uint64ToBytes(batchNumber), newData) + } + if err != nil { + return err + } } return db.deleteFromBucketWithUintKeysRange(BLOCKBATCHES, fromBlockNum, toBlockNum) From d1be243d516cac372af8b7aa93ff68fac0217df0 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Tue, 30 Jul 2024 15:41:04 +0000 Subject: [PATCH 10/33] verifier --- turbo/stages/zk_stages.go | 2 +- zk/datastream/server/data_stream_server.go | 5 +- .../legacy_executor_verifier.go | 641 ++++++++++-------- zk/legacy_executor_verifier/promise.go | 26 +- zk/stages/stage_interhashes.go | 2 +- zk/stages/stage_sequence_execute.go | 40 +- zk/stages/stage_sequence_execute_batch.go | 65 +- zk/stages/stage_sequence_execute_blocks.go | 2 +- .../stage_sequence_execute_data_stream.go | 25 +- zk/stages/stage_sequence_execute_unwind.go | 6 +- zk/stages/stage_sequence_execute_utils.go | 10 +- zk/stages/stage_sequence_execute_verifier.go | 366 +++++----- zk/stages/stage_sequencer_interhashes.go | 78 +-- zk/stages/stages.go | 8 +- 14 files changed, 686 insertions(+), 590 deletions(-) diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 271aa9b2b89..ccb39c75304 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -117,7 +117,7 @@ func NewSequencerZkStages(ctx context.Context, zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), - zkStages.StageSequencerInterhashesCfg(db, notifications.Accumulator), + // zkStages.StageSequencerInterhashesCfg(db, notifications.Accumulator), zkStages.StageSequenceBlocksCfg( db, cfg.Prune, diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 47a357c6fe8..14535999850 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -227,7 +227,8 @@ func createFullBlockStreamEntriesProto( batchNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64, ) (*DataStreamEntries, error) { - entries := NewDataStreamEntries(len(filteredTransactions) + 3) // block bookmark + block + block end + // entries := NewDataStreamEntries(len(filteredTransactions) + 3) // block bookmark + block + block end + entries := NewDataStreamEntries(len(filteredTransactions) + 2) // block bookmark + block + block end blockNum := block.NumberU64() // L2 BLOCK BOOKMARK entries.Add(newL2BlockBookmarkEntryProto(blockNum)) @@ -274,7 +275,7 @@ func createFullBlockStreamEntriesProto( entries.Add(transaction) } - entries.Add(newL2BlockEndProto(blockNum)) + // entries.Add(newL2BlockEndProto(blockNum)) return entries, nil } diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index f022e2163dc..da2a8c80ca2 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -2,6 +2,7 @@ package legacy_executor_verifier import ( "context" + "sync" "sync/atomic" "time" @@ -10,8 +11,6 @@ import ( "fmt" "strconv" - "sync" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/kv" @@ -85,20 +84,21 @@ type LegacyExecutorVerifier struct { executorNumber int cancelAllVerifications atomic.Bool - quit chan struct{} + // quit chan struct{} streamServer *server.DataStreamServer witnessGenerator WitnessGenerator - promises []*Promise[*VerifierBundle] - addedBatches map[uint64]struct{} + promises []*Promise[*VerifierBundle] + mtxPromises *sync.Mutex + // addedBatches map[uint64]struct{} // these three items are used to keep track of where the datastream is at // compared with the executor checks. It allows for data to arrive in strange // orders and will backfill the stream as needed. - lowestWrittenBatch uint64 - responsesToWrite map[uint64]struct{} - responsesMtx *sync.Mutex + // lowestWrittenBatch uint64 + // responsesToWrite map[uint64]struct{} + // responsesMtx *sync.Mutex } func NewLegacyExecutorVerifier( @@ -116,16 +116,43 @@ func NewLegacyExecutorVerifier( executors: executors, executorNumber: 0, cancelAllVerifications: atomic.Bool{}, - quit: make(chan struct{}), - streamServer: streamServer, - witnessGenerator: witnessGenerator, - promises: make([]*Promise[*VerifierBundle], 0), - addedBatches: make(map[uint64]struct{}), - responsesToWrite: map[uint64]struct{}{}, - responsesMtx: &sync.Mutex{}, + // quit: make(chan struct{}), + streamServer: streamServer, + witnessGenerator: witnessGenerator, + promises: make([]*Promise[*VerifierBundle], 0), + mtxPromises: &sync.Mutex{}, + // addedBatches: make(map[uint64]struct{}), + // responsesToWrite: map[uint64]struct{}{}, + // responsesMtx: &sync.Mutex{}, } } +func (v *LegacyExecutorVerifier) StartAsyncVerification( + forkId uint64, + batchNumber uint64, + stateRoot common.Hash, + counters map[string]int, + blockNumbers []uint64, + useRemoteExecutor bool, +) { + var promise *Promise[*VerifierBundle] + + request := NewVerifierRequest(batchNumber, blockNumbers[len(blockNumbers)-1], forkId, stateRoot, counters) + if useRemoteExecutor { + promise = v.VerifyAsync(request, blockNumbers) + } else { + promise = v.VerifyWithoutExecutor(request, blockNumbers) + } + + v.appendPromise(promise) +} + +func (v *LegacyExecutorVerifier) appendPromise(promise *Promise[*VerifierBundle]) { + v.mtxPromises.Lock() + defer v.mtxPromises.Unlock() + v.promises = append(v.promises, promise) +} + func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, witness, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { oldAccInputHash := common.HexToHash("0x0") payload := &Payload{ @@ -161,19 +188,159 @@ func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, } // Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequencerBatchSealTime time.Duration) *Promise[*VerifierBundle] { +// func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequencerBatchSealTime time.Duration) *Promise[*VerifierBundle] { +// // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later +// // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil +// promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { +// verifierBundle := NewVerifierBundle(request, nil) + +// e := v.GetNextOnlineAvailableExecutor() +// if e == nil { +// return verifierBundle, ErrNoExecutorAvailable +// } + +// t := utils.StartTimer("legacy-executor-verifier", "add-request-unsafe") + +// e.AquireAccess() +// defer e.ReleaseAccess() +// if v.cancelAllVerifications.Load() { +// return nil, ErrPromiseCancelled +// } + +// var err error +// var blocks []uint64 +// startTime := time.Now() +// ctx := context.Background() +// // mapmutation has some issue with us not having a quit channel on the context call to `Done` so +// // here we're creating a cancelable context and just deferring the cancel +// innerCtx, cancel := context.WithCancel(ctx) +// defer cancel() + +// // get the data stream bytes +// for time.Since(startTime) < 3*sequencerBatchSealTime { +// // we might not have blocks yet as the underlying stage loop might still be running and the tx hasn't been +// // committed yet so just requeue the request +// blocks, err = v.availableBlocksToProcess(innerCtx, request.BatchNumber) +// if err != nil { +// return verifierBundle, err +// } + +// if len(blocks) > 0 { +// break +// } + +// time.Sleep(time.Second) +// } + +// if len(blocks) == 0 { +// return verifierBundle, fmt.Errorf("still not blocks in this batch") +// } + +// tx, err := v.db.BeginRo(innerCtx) +// if err != nil { +// return verifierBundle, err +// } +// defer tx.Rollback() + +// hermezDb := hermez_db.NewHermezDbReader(tx) + +// l1InfoTreeMinTimestamps := make(map[uint64]uint64) +// streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) +// if err != nil { +// return verifierBundle, err +// } + +// witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blocks[0], blocks[len(blocks)-1], false, v.cfg.WitnessFull) +// if err != nil { +// return nil, err +// } + +// log.Debug("witness generated", "data", hex.EncodeToString(witness)) + +// // now we need to figure out the timestamp limit for this payload. It must be: +// // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp +// // so to ensure we have a good value we can take the timestamp of the last block in the batch +// // and just add 5 minutes +// lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[len(blocks)-1]) +// if err != nil { +// return verifierBundle, err +// } + +// // executor is perfectly happy with just an empty hash here +// oldAccInputHash := common.HexToHash("0x0") +// timestampLimit := lastBlock.Time() +// payload := &Payload{ +// Witness: witness, +// DataStream: streamBytes, +// Coinbase: v.cfg.AddressSequencer.String(), +// OldAccInputHash: oldAccInputHash.Bytes(), +// L1InfoRoot: nil, +// TimestampLimit: timestampLimit, +// ForcedBlockhashL1: []byte{0}, +// ContextId: strconv.FormatUint(request.BatchNumber, 10), +// L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, +// } + +// previousBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) +// if err != nil { +// return verifierBundle, err +// } + +// ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) +// if executorErr != nil { +// if errors.Is(executorErr, ErrExecutorStateRootMismatch) { +// log.Error("[Verifier] State root mismatch detected", "err", executorErr) +// } else if errors.Is(executorErr, ErrExecutorUnknownError) { +// log.Error("[Verifier] Unexpected error found from executor", "err", executorErr) +// } else { +// log.Error("[Verifier] Error", "err", executorErr) +// } +// } + +// // log timing w/o stream write +// t.LogTimer() + +// if ok { +// if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { +// log.Error("error writing data to stream", "err", err) +// } +// } + +// verifierBundle.Response = &VerifierResponse{ +// BatchNumber: request.BatchNumber, +// Valid: ok, +// Witness: witness, +// ExecutorResponse: executorResponse, +// Error: executorErr, +// } +// return verifierBundle, nil +// }) + +// // add batch to the list of batches we've added +// v.addedBatches[request.BatchNumber] = struct{}{} + +// // add the promise to the list of promises +// v.promises = append(v.promises, promise) +// return promise +// } + +var counter = 0 + +func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil - promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { + return NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { verifierBundle := NewVerifierBundle(request, nil) + // bundleWithBlocks := &VerifierBundle{ + // Blocks: blockNumbers, + // Bundle: verifierBundle, + // } e := v.GetNextOnlineAvailableExecutor() if e == nil { return verifierBundle, ErrNoExecutorAvailable } - t := utils.StartTimer("legacy-executor-verifier", "add-request-unsafe") - e.AquireAccess() defer e.ReleaseAccess() if v.cancelAllVerifications.Load() { @@ -181,34 +348,12 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } var err error - var blocks []uint64 - startTime := time.Now() ctx := context.Background() // mapmutation has some issue with us not having a quit channel on the context call to `Done` so // here we're creating a cancelable context and just deferring the cancel innerCtx, cancel := context.WithCancel(ctx) defer cancel() - // get the data stream bytes - for time.Since(startTime) < 3*sequencerBatchSealTime { - // we might not have blocks yet as the underlying stage loop might still be running and the tx hasn't been - // committed yet so just requeue the request - blocks, err = v.availableBlocksToProcess(innerCtx, request.BatchNumber) - if err != nil { - return verifierBundle, err - } - - if len(blocks) > 0 { - break - } - - time.Sleep(time.Second) - } - - if len(blocks) == 0 { - return verifierBundle, fmt.Errorf("still not blocks in this batch") - } - tx, err := v.db.BeginRo(innerCtx) if err != nil { return verifierBundle, err @@ -218,14 +363,14 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ hermezDb := hermez_db.NewHermezDbReader(tx) l1InfoTreeMinTimestamps := make(map[uint64]uint64) - streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) + streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blockNumbers, hermezDb, l1InfoTreeMinTimestamps, nil) if err != nil { return verifierBundle, err } - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blocks[0], blocks[len(blocks)-1], false, v.cfg.WitnessFull) + witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) if err != nil { - return nil, err + return verifierBundle, err } log.Debug("witness generated", "data", hex.EncodeToString(witness)) @@ -234,7 +379,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp // so to ensure we have a good value we can take the timestamp of the last block in the batch // and just add 5 minutes - lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[len(blocks)-1]) + lastBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[len(blockNumbers)-1]) if err != nil { return verifierBundle, err } @@ -254,12 +399,18 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, } - previousBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) + previousBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[0]-1) if err != nil { return verifierBundle, err } ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) + + if request.BlockNumber == 4 && counter == 0 { + ok = false + counter = 1 + } + if executorErr != nil { if errors.Is(executorErr, ErrExecutorStateRootMismatch) { log.Error("[Verifier] State root mismatch detected", "err", executorErr) @@ -270,17 +421,9 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } } - // log timing w/o stream write - t.LogTimer() - - if ok { - if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { - log.Error("error writing data to stream", "err", err) - } - } - verifierBundle.Response = &VerifierResponse{ BatchNumber: request.BatchNumber, + BlockNumber: request.BlockNumber, Valid: ok, Witness: witness, ExecutorResponse: executorResponse, @@ -288,181 +431,45 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } return verifierBundle, nil }) - - // add batch to the list of batches we've added - v.addedBatches[request.BatchNumber] = struct{}{} - - // add the promise to the list of promises - v.promises = append(v.promises, promise) - return promise } -type VerifierBundleWithBlocks struct { - Blocks []uint64 - Bundle *VerifierBundle -} - -func (v *LegacyExecutorVerifier) CreateAsyncPromise( - request *VerifierRequest, - blockNumbers []uint64, -) *Promise[*VerifierBundleWithBlocks] { - // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later - // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil - promise := NewPromise[*VerifierBundleWithBlocks](func() (*VerifierBundleWithBlocks, error) { - verifierBundle := NewVerifierBundle(request, nil) - bundleWithBlocks := &VerifierBundleWithBlocks{ - Blocks: blockNumbers, - Bundle: verifierBundle, - } - - e := v.GetNextOnlineAvailableExecutor() - if e == nil { - return bundleWithBlocks, ErrNoExecutorAvailable - } - - e.AquireAccess() - defer e.ReleaseAccess() - if v.cancelAllVerifications.Load() { - return nil, ErrPromiseCancelled - } - - var err error - ctx := context.Background() - // mapmutation has some issue with us not having a quit channel on the context call to `Done` so - // here we're creating a cancelable context and just deferring the cancel - innerCtx, cancel := context.WithCancel(ctx) - defer cancel() +func (v *LegacyExecutorVerifier) VerifyWithoutExecutor(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { + valid := true + // simulate a die roll to determine if this is a good batch or not + // 1 in 6 chance of being a bad batch + // if rand.Intn(6) == 0 { + // valid = false + // } - tx, err := v.db.BeginRo(innerCtx) - if err != nil { - return bundleWithBlocks, err - } - defer tx.Rollback() - - hermezDb := hermez_db.NewHermezDbReader(tx) - - l1InfoTreeMinTimestamps := make(map[uint64]uint64) - streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blockNumbers, hermezDb, l1InfoTreeMinTimestamps, nil) - if err != nil { - return bundleWithBlocks, err - } - - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) - if err != nil { - return nil, err - } - - log.Debug("witness generated", "data", hex.EncodeToString(witness)) - - // now we need to figure out the timestamp limit for this payload. It must be: - // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp - // so to ensure we have a good value we can take the timestamp of the last block in the batch - // and just add 5 minutes - lastBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[len(blockNumbers)-1]) - if err != nil { - return bundleWithBlocks, err - } - - // executor is perfectly happy with just an empty hash here - oldAccInputHash := common.HexToHash("0x0") - timestampLimit := lastBlock.Time() - payload := &Payload{ - Witness: witness, - DataStream: streamBytes, - Coinbase: v.cfg.AddressSequencer.String(), - OldAccInputHash: oldAccInputHash.Bytes(), - L1InfoRoot: nil, - TimestampLimit: timestampLimit, - ForcedBlockhashL1: []byte{0}, - ContextId: strconv.FormatUint(request.BatchNumber, 10), - L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, - } - - previousBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[0]-1) - if err != nil { - return bundleWithBlocks, err - } - - ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) - if executorErr != nil { - if errors.Is(executorErr, ErrExecutorStateRootMismatch) { - log.Error("[Verifier] State root mismatch detected", "err", executorErr) - } else if errors.Is(executorErr, ErrExecutorUnknownError) { - log.Error("[Verifier] Unexpected error found from executor", "err", executorErr) - } else { - log.Error("[Verifier] Error", "err", executorErr) - } - } - - verifierBundle.Response = &VerifierResponse{ + promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { + response := &VerifierResponse{ BatchNumber: request.BatchNumber, BlockNumber: request.BlockNumber, - Valid: ok, - Witness: witness, - ExecutorResponse: executorResponse, - Error: executorErr, + Valid: valid, + OriginalCounters: request.Counters, + Witness: nil, + ExecutorResponse: nil, + Error: nil, } - return bundleWithBlocks, nil + return NewVerifierBundle(request, response), nil }) + promise.Wait() return promise } -func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { - t := utils.StartTimer("legacy-executor-verifier", "check-and-write-to-stream") - defer t.LogTimer() - - v.responsesMtx.Lock() - defer v.responsesMtx.Unlock() +func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle, int, error) { + v.mtxPromises.Lock() + defer v.mtxPromises.Unlock() - v.responsesToWrite[newBatch] = struct{}{} + var verifierResponse []*VerifierBundle - // if we haven't written anything yet - cold start of the node - if v.lowestWrittenBatch == 0 { - // we haven't written anything yet so lets make sure there is no gap - // in the stream for this batch - latestBatch, err := v.streamServer.GetHighestBatchNumber() - if err != nil { - return err - } - log.Info("[Verifier] Initialising on cold start", "latestBatch", latestBatch, "newBatch", newBatch) - - v.lowestWrittenBatch = latestBatch - - // check if we have the next batch we're waiting for - if latestBatch == newBatch-1 { - if err := v.WriteBatchToStream(newBatch, hdb, tx); err != nil { - return err - } - v.lowestWrittenBatch = newBatch - delete(v.responsesToWrite, newBatch) - } - } - - // now check if the batch we want next is good - for { - // check if we have the next batch to write - nextBatch := v.lowestWrittenBatch + 1 - if _, ok := v.responsesToWrite[nextBatch]; !ok { - break - } - - if err := v.WriteBatchToStream(nextBatch, hdb, tx); err != nil { - return err - } - delete(v.responsesToWrite, nextBatch) - v.lowestWrittenBatch = nextBatch - } - - return nil -} - -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([]*VerifierResponse, error) { - results := make([]*VerifierResponse, 0, len(v.promises)) - for i := 0; i < len(v.promises); i++ { - verifierBundle, err := v.promises[i].TryGet() + // not a stop signal, so we can start to process our promises now + for idx, promise := range v.promises { + verifierBundle, err := promise.TryGet() if verifierBundle == nil && err == nil { + // If code enters here this means that this promise is not yet completed + // We must processes responses sequentially so if this one is not ready we can just break break } @@ -474,35 +481,122 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([ } log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) - // this is an error on our end, so just re-create the promise at exact position where it was + if verifierBundle.Request.IsOverdue() { - return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) + // signal an error, the caller can check on this and stop the process if needs be + return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) } - v.promises[i] = NewPromise[*VerifierBundle](v.promises[i].task) - break - } - - verifierResponse := verifierBundle.Response - results = append(results, verifierResponse) - delete(v.addedBatches, verifierResponse.BatchNumber) + // re-queue the task - it should be safe to replace the index of the slice here as we only add to it + v.promises[idx] = promise.CloneAndRerun() - // no point to process any further responses if we've found an invalid one - if !verifierResponse.Valid { + // break now as we know we can't proceed here until this promise is attempted again break } + + verifierResponse = append(verifierResponse, verifierBundle) } - return results, nil -} + // remove processed promises from the list + v.promises = v.promises[len(verifierResponse):] -func (v *LegacyExecutorVerifier) MarkTopResponseAsProcessed(batchNumber uint64) { - v.promises = v.promises[1:] - delete(v.addedBatches, batchNumber) + return verifierResponse, len(v.promises), nil } +// func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { +// t := utils.StartTimer("legacy-executor-verifier", "check-and-write-to-stream") +// defer t.LogTimer() + +// v.responsesMtx.Lock() +// defer v.responsesMtx.Unlock() + +// v.responsesToWrite[newBatch] = struct{}{} + +// // if we haven't written anything yet - cold start of the node +// if v.lowestWrittenBatch == 0 { +// // we haven't written anything yet so lets make sure there is no gap +// // in the stream for this batch +// latestBatch, err := v.streamServer.GetHighestBatchNumber() +// if err != nil { +// return err +// } +// log.Info("[Verifier] Initialising on cold start", "latestBatch", latestBatch, "newBatch", newBatch) + +// v.lowestWrittenBatch = latestBatch + +// // check if we have the next batch we're waiting for +// if latestBatch == newBatch-1 { +// if err := v.WriteBatchToStream(newBatch, hdb, tx); err != nil { +// return err +// } +// v.lowestWrittenBatch = newBatch +// delete(v.responsesToWrite, newBatch) +// } +// } + +// // now check if the batch we want next is good +// for { +// // check if we have the next batch to write +// nextBatch := v.lowestWrittenBatch + 1 +// if _, ok := v.responsesToWrite[nextBatch]; !ok { +// break +// } + +// if err := v.WriteBatchToStream(nextBatch, hdb, tx); err != nil { +// return err +// } +// delete(v.responsesToWrite, nextBatch) +// v.lowestWrittenBatch = nextBatch +// } + +// return nil +// } + // Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) CancelAllRequestsUnsafe() { +// func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([]*VerifierResponse, error) { +// results := make([]*VerifierResponse, 0, len(v.promises)) +// for i := 0; i < len(v.promises); i++ { +// verifierBundle, err := v.promises[i].TryGet() +// if verifierBundle == nil && err == nil { +// break +// } + +// if err != nil { +// // let leave it for debug purposes +// // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error +// if errors.Is(err, ErrPromiseCancelled) { +// panic("this should never happen") +// } + +// log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) +// // this is an error on our end, so just re-create the promise at exact position where it was +// if verifierBundle.Request.IsOverdue() { +// return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) +// } + +// v.promises[i] = NewPromise[*VerifierBundle](v.promises[i].task) +// break +// } + +// verifierResponse := verifierBundle.Response +// results = append(results, verifierResponse) +// delete(v.addedBatches, verifierResponse.BatchNumber) + +// // no point to process any further responses if we've found an invalid one +// if !verifierResponse.Valid { +// break +// } +// } + +// return results, nil +// } + +// func (v *LegacyExecutorVerifier) MarkTopResponseAsProcessed(batchNumber uint64) { +// v.promises = v.promises[1:] +// delete(v.addedBatches, batchNumber) +// } + +func (v *LegacyExecutorVerifier) CancelAllRequests() { // cancel all promises // all queued promises will return ErrPromiseCancelled while getting its result for _, p := range v.promises { @@ -523,28 +617,27 @@ func (v *LegacyExecutorVerifier) CancelAllRequestsUnsafe() { v.cancelAllVerifications.Store(false) v.promises = make([]*Promise[*VerifierBundle], 0) - v.addedBatches = map[uint64]struct{}{} } -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) HasExecutorsUnsafe() bool { - return len(v.executors) > 0 -} +// // Unsafe is not thread-safe so it MUST be invoked only from a single thread +// func (v *LegacyExecutorVerifier) HasExecutorsUnsafe() bool { +// return len(v.executors) > 0 +// } // Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) IsRequestAddedUnsafe(batch uint64) bool { - _, ok := v.addedBatches[batch] - return ok -} +// func (v *LegacyExecutorVerifier) IsRequestAddedUnsafe(batch uint64) bool { +// _, ok := v.addedBatches[batch] +// return ok +// } -func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *hermez_db.HermezDbReader, roTx kv.Tx) error { - log.Info("[Verifier] Writing batch to stream", "batch", batchNumber) +// func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *hermez_db.HermezDbReader, roTx kv.Tx) error { +// log.Info("[Verifier] Writing batch to stream", "batch", batchNumber) - if err := v.streamServer.WriteWholeBatchToStream("verifier", roTx, hdb, v.lowestWrittenBatch, batchNumber); err != nil { - return err - } - return nil -} +// if err := v.streamServer.WriteWholeBatchToStream("verifier", roTx, hdb, v.lowestWrittenBatch, batchNumber); err != nil { +// return err +// } +// return nil +// } func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { var exec *Executor @@ -567,31 +660,31 @@ func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { return exec } -func (v *LegacyExecutorVerifier) availableBlocksToProcess(innerCtx context.Context, batchNumber uint64) ([]uint64, error) { - tx, err := v.db.BeginRo(innerCtx) - if err != nil { - return []uint64{}, err - } - defer tx.Rollback() - - hermezDb := hermez_db.NewHermezDbReader(tx) - blocks, err := hermezDb.GetL2BlockNosByBatch(batchNumber) - if err != nil { - return []uint64{}, err - } - - for _, blockNum := range blocks { - block, err := rawdb.ReadBlockByNumber(tx, blockNum) - if err != nil { - return []uint64{}, err - } - if block == nil { - return []uint64{}, nil - } - } - - return blocks, nil -} +// func (v *LegacyExecutorVerifier) availableBlocksToProcess(innerCtx context.Context, batchNumber uint64) ([]uint64, error) { +// tx, err := v.db.BeginRo(innerCtx) +// if err != nil { +// return []uint64{}, err +// } +// defer tx.Rollback() + +// hermezDb := hermez_db.NewHermezDbReader(tx) +// blocks, err := hermezDb.GetL2BlockNosByBatch(batchNumber) +// if err != nil { +// return []uint64{}, err +// } + +// for _, blockNum := range blocks { +// block, err := rawdb.ReadBlockByNumber(tx, blockNum) +// if err != nil { +// return []uint64{}, err +// } +// if block == nil { +// return []uint64{}, nil +// } +// } + +// return blocks, nil +// } func (v *LegacyExecutorVerifier) GetWholeBatchStreamBytes( batchNumber uint64, diff --git a/zk/legacy_executor_verifier/promise.go b/zk/legacy_executor_verifier/promise.go index b374b730945..49c22cb04f7 100644 --- a/zk/legacy_executor_verifier/promise.go +++ b/zk/legacy_executor_verifier/promise.go @@ -40,28 +40,12 @@ func NewPromise[T any](task func() (T, error)) *Promise[T] { return p } -func NewPromiseSync[T any](task func() (T, error)) *Promise[T] { - p := &Promise[T]{} - - result, err := task() - p.mutex.Lock() - defer p.mutex.Unlock() // this will be the first defer that is executed when the function retunrs - - if p.cancelled { - err = ErrPromiseCancelled - } else { - p.result = result - p.err = err - } - - if err != nil { - p.task = task - } - return p +func (p *Promise[T]) Wait() { + p.wg.Wait() // .Wait ensures that all memory operations before .Done are visible after .Wait => no need to lock/unlock the mutex } func (p *Promise[T]) Get() (T, error) { - p.wg.Wait() // .Wait ensures that all memory operations before .Done are visible after .Wait => no need to lock/unlock the mutex + p.Wait() return p.result, p.err } @@ -77,6 +61,6 @@ func (p *Promise[T]) Cancel() { p.cancelled = true } -func (p *Promise[T]) Task() func() (T, error) { - return p.task +func (p *Promise[T]) CloneAndRerun() *Promise[T] { + return NewPromise[T](p.task) } diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index c22eee32ee4..ea24a1ed917 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -218,7 +218,7 @@ func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.St expectedRootHash = syncHeadHeader.Root } - root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, true, &expectedRootHash, quit) + root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, quit) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index e4bd6548088..3f675e18a29 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -74,6 +74,7 @@ func SpawnSequencingStage( return sdb.tx.Commit() } + // if !isLastBatchPariallyProcessed { // handle case where batch wasn't closed properly // close it before starting a new one // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted @@ -81,6 +82,7 @@ func SpawnSequencingStage( if err = finalizeLastBatchInDatastreamIfNotFinalized(batchContext, batchState, executionAt); err != nil { return err } + // } if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, sdb.hermezDb, logPrefix); err != nil { return err @@ -106,15 +108,15 @@ func SpawnSequencingStage( blockDataSizeChecker := NewBlockDataChecker() batchDataOverflow := false - batchVerifier := NewBatchVerifier(cfg.zk, batchState.hasExecutorForThisBatch, cfg.legacyVerifier, batchState.forkId) + // batchVerifier := NewBatchVerifier(cfg.zk, batchState.hasExecutorForThisBatch, cfg.legacyVerifier, batchState.forkId) streamWriter := &SequencerBatchStreamWriter{ - ctx: ctx, - logPrefix: logPrefix, - batchVerifier: batchVerifier, - sdb: sdb, - streamServer: cfg.datastreamServer, - hasExecutors: batchState.hasExecutorForThisBatch, - lastBatch: lastBatch, + ctx: ctx, + logPrefix: logPrefix, + legacyVerifier: cfg.legacyVerifier, + sdb: sdb, + streamServer: cfg.datastreamServer, + hasExecutors: batchState.hasExecutorForThisBatch, + lastBatch: lastBatch, } limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(batchState.batchNumber) @@ -152,7 +154,7 @@ func SpawnSequencingStage( if !isLastBatchPariallyProcessed { log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) } else { - log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, batchState.batchNumber, executionAt)) + log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, batchState.batchNumber, executionAt+1)) } var block *types.Block @@ -391,17 +393,27 @@ func SpawnSequencingStage( return err } - if err = sdb.CommitAndStart(); err != nil { - return err + // add a check to the verifier and also check for responses + batchState.onBuiltBlock(blockNumber) + + // commit block data here so it is accessible in other threads + if errCommitAndStart := sdb.CommitAndStart(); errCommitAndStart != nil { + return errCommitAndStart } defer sdb.tx.Rollback() - // add a check to the verifier and also check for responses - batchState.onBuiltBlock(blockNumber) - batchVerifier.AddNewCheck(batchState.batchNumber, blockNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks) + cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, batchState.hasExecutorForThisBatch) // check for new responses from the verifier needsUnwind, _, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + + // lets commit everything after updateStreamAndCheckRollback no matter of its result + if errCommitAndStart := sdb.CommitAndStart(); errCommitAndStart != nil { + return errCommitAndStart + } + defer sdb.tx.Rollback() + + // check the return values of updateStreamAndCheckRollback and CommitAndStart if err != nil || needsUnwind { return err } diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 1ec8838c50f..91376bed9b2 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -55,13 +55,16 @@ func doInstantCloseIfNeeded(batchContext *BatchContext, batchState *BatchState, // only close this batch down if we actually made any progress in it, otherwise // just continue processing as normal and recreate the batch from scratch if len(blocks) > 0 { + // if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { + // return false, err + // } if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { return false, err } - if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return false, err } - if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { + if err = updateSequencerProgress(batchContext.sdb.tx, blocks[len(blocks)-1], batchState.batchNumber, 1, false); err != nil { return false, err } @@ -122,40 +125,40 @@ func updateStreamAndCheckRollback( streamWriter *SequencerBatchStreamWriter, u stagedsync.Unwinder, ) (bool, int, error) { - committed, remaining, err := streamWriter.CommitNewUpdates(batchState.forkId) + committed, remaining, err := streamWriter.CommitNewUpdates() if err != nil { return false, remaining, err } + for _, commit := range committed { - if !commit.Valid { - // we are about to unwind so place the marker ready for this to happen - if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { - return false, 0, err - } - // capture the fork otherwise when the loop starts again to close - // off the batch it will detect it as a fork upgrade - if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { - return false, 0, err - } - - unwindTo := commit.BlockNumber - 1 - - // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block - // causing the unwind. - unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, commit.BlockNumber) - if unwindHeader == nil { - return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) - } - - if err = batchContext.sdb.tx.Commit(); err != nil { - return false, 0, err - } - - log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) - - u.UnwindTo(unwindTo, unwindHeader.Hash()) - return true, 0, nil + if commit.Valid { + continue + } + + // we are about to unwind so place the marker ready for this to happen + if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { + return false, 0, err + } + // capture the fork otherwise when the loop starts again to close + // off the batch it will detect it as a fork upgrade + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { + return false, 0, err + } + + unwindTo := commit.BlockNumber - 1 + + // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block + // causing the unwind. + unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, commit.BlockNumber) + if unwindHeader == nil { + return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) } + + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + + u.UnwindTo(unwindTo, unwindHeader.Hash()) + streamWriter.legacyVerifier.CancelAllRequests() + return true, 0, nil } return false, remaining, nil diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 1ef4ccef675..1f274756732 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -85,7 +85,7 @@ func doFinishBlockAndUpdateState( return nil, err } - if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, l1InfoIndex); err != nil { + if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, l1InfoIndex, false); err != nil { return nil, err } diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index 7ca0da97426..fe72404cb03 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -12,13 +12,13 @@ import ( ) type SequencerBatchStreamWriter struct { - ctx context.Context - logPrefix string - batchVerifier *BatchVerifier - sdb *stageDb - streamServer *server.DataStreamServer - hasExecutors bool - lastBatch uint64 + ctx context.Context + logPrefix string + legacyVerifier *verifier.LegacyExecutorVerifier + sdb *stageDb + streamServer *server.DataStreamServer + hasExecutors bool + lastBatch uint64 } type BlockStatus struct { @@ -27,9 +27,9 @@ type BlockStatus struct { Error error } -func (sbc *SequencerBatchStreamWriter) CommitNewUpdates(forkId uint64) ([]BlockStatus, int, error) { +func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]BlockStatus, int, error) { var written []BlockStatus - responses, remaining, err := sbc.batchVerifier.CheckProgress() + responses, remaining, err := sbc.legacyVerifier.ProcessResultsSequentially() if err != nil { return written, remaining, err } @@ -38,7 +38,7 @@ func (sbc *SequencerBatchStreamWriter) CommitNewUpdates(forkId uint64) ([]BlockS return written, remaining, nil } - written, err = sbc.writeBlockDetails(responses, forkId) + written, err = sbc.writeBlockDetailsToDatastream(responses) if err != nil { return written, remaining, err } @@ -46,9 +46,10 @@ func (sbc *SequencerBatchStreamWriter) CommitNewUpdates(forkId uint64) ([]BlockS return written, remaining, nil } -func (sbc *SequencerBatchStreamWriter) writeBlockDetails(verifiedBundles []*verifier.VerifierBundle, forkId uint64) ([]BlockStatus, error) { +func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBundles []*verifier.VerifierBundle) ([]BlockStatus, error) { var written []BlockStatus for _, bundle := range verifiedBundles { + request := bundle.Request response := bundle.Response if response.Valid { @@ -61,7 +62,7 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetails(verifiedBundles []*veri return written, err } - if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, forkId, response.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { + if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, request.ForkId, response.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { return written, err } diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index dca61c79a2b..e2f6d0a1436 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -47,6 +47,9 @@ func UnwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.Stage func unwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, initialCycle bool) error { hermezDb := hermez_db.NewHermezDb(tx) fromBatch, err := hermezDb.GetBatchNoByL2Block(u.UnwindPoint) + if err != nil { + return err + } if err := stagedsync.UnwindExecutionStageErigon(u, s, tx, ctx, cfg.toErigonExecuteBlockCfg(), initialCycle); err != nil { return err @@ -60,7 +63,8 @@ func unwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.Stage return err } - if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, 1); err != nil { + //TODO: why l1infoindex is 1? + if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, 1, true); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index c19b1716210..051a4caadbe 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -190,6 +190,7 @@ func prepareForkId(lastBatch, executionAt uint64, hermezDb forkDb) (uint64, erro } } + latest = 11 if latest == 0 { return 0, fmt.Errorf("could not find a suitable fork for batch %v, cannot start sequencer, check contract configuration", lastBatch+1) } @@ -307,7 +308,7 @@ func calculateNextL1TreeUpdateToUse(lastInfoIndex uint64, hermezDb *hermez_db.He return nextL1Index, l1Info, nil } -func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1InfoIndex uint64) error { +func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1InfoIndex uint64, unwinding bool) error { // now update stages that will be used later on in stageloop.go and other stages. As we're the sequencer // we won't have headers stage for example as we're already writing them here if err := stages.SaveStageProgress(tx, stages.Execution, newHeight); err != nil { @@ -323,6 +324,13 @@ func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1In return err } + if !unwinding { + // Update interhashes stage progress + if err := stages.SaveStageProgress(tx, stages.IntermediateHashes, newHeight); err != nil { + return err + } + } + return nil } diff --git a/zk/stages/stage_sequence_execute_verifier.go b/zk/stages/stage_sequence_execute_verifier.go index a0b12eab3a1..c30498bed81 100644 --- a/zk/stages/stage_sequence_execute_verifier.go +++ b/zk/stages/stage_sequence_execute_verifier.go @@ -1,190 +1,180 @@ package stages -import ( - "errors" - "fmt" - "sync" - - "github.com/gateway-fm/cdk-erigon-lib/common" - "github.com/ledgerwatch/erigon/eth/ethconfig" - verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" - "github.com/ledgerwatch/log/v3" -) - -type PromiseWithBlocks struct { - Promise *verifier.Promise[*verifier.VerifierBundleWithBlocks] - Blocks []uint64 -} - -type BatchVerifier struct { - cfg *ethconfig.Zk - legacyVerifier *verifier.LegacyExecutorVerifier - hasExecutor bool - forkId uint64 - mtxPromises *sync.Mutex - promises []*PromiseWithBlocks - stop bool - errors chan error - finishCond *sync.Cond -} - -func NewBatchVerifier( - cfg *ethconfig.Zk, - hasExecutors bool, - legacyVerifier *verifier.LegacyExecutorVerifier, - forkId uint64, -) *BatchVerifier { - return &BatchVerifier{ - cfg: cfg, - hasExecutor: hasExecutors, - legacyVerifier: legacyVerifier, - forkId: forkId, - mtxPromises: &sync.Mutex{}, - promises: make([]*PromiseWithBlocks, 0), - errors: make(chan error), - finishCond: sync.NewCond(&sync.Mutex{}), - } -} - -func (bv *BatchVerifier) AddNewCheck( - batchNumber uint64, - blockNumber uint64, - stateRoot common.Hash, - counters map[string]int, - blockNumbers []uint64, -) { - request := verifier.NewVerifierRequest(batchNumber, blockNumber, bv.forkId, stateRoot, counters) - - var promise *PromiseWithBlocks - if bv.hasExecutor { - promise = bv.asyncPromise(request, blockNumbers) - } else { - promise = bv.syncPromise(request, blockNumbers) - } - - bv.appendPromise(promise) -} - -func (bv *BatchVerifier) WaitForFinish() { - count := 0 - bv.mtxPromises.Lock() - count = len(bv.promises) - bv.mtxPromises.Unlock() - - if count > 0 { - bv.finishCond.L.Lock() - bv.finishCond.Wait() - bv.finishCond.L.Unlock() - } -} - -func (bv *BatchVerifier) appendPromise(promise *PromiseWithBlocks) { - bv.mtxPromises.Lock() - defer bv.mtxPromises.Unlock() - bv.promises = append(bv.promises, promise) -} - -func (bv *BatchVerifier) CheckProgress() ([]*verifier.VerifierBundle, int, error) { - bv.mtxPromises.Lock() - defer bv.mtxPromises.Unlock() - - var responses []*verifier.VerifierBundle - - // not a stop signal, so we can start to process our promises now - processed := 0 - for idx, promise := range bv.promises { - bundleWithBlocks, err := promise.Promise.TryGet() - if bundleWithBlocks == nil && err == nil { - // nothing to process in this promise so we skip it - break - } - - if err != nil { - // let leave it for debug purposes - // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error - if errors.Is(err, verifier.ErrPromiseCancelled) { - panic("this should never happen") - } - - log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) - - if bundleWithBlocks == nil { - // we can't proceed here until this promise is attempted again - break - } - - if bundleWithBlocks.Bundle.Request.IsOverdue() { - // signal an error, the caller can check on this and stop the process if needs be - return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", bundleWithBlocks.Bundle.Request.BatchNumber) - } - - // re-queue the task - it should be safe to replace the index of the slice here as we only add to it - if bv.hasExecutor { - prom := bv.asyncPromise(bundleWithBlocks.Bundle.Request, bundleWithBlocks.Blocks) - bv.promises[idx] = prom - } else { - prom := bv.syncPromise(bundleWithBlocks.Bundle.Request, bundleWithBlocks.Blocks) - bv.promises[idx] = prom - } - - // break now as we know we can't proceed here until this promise is attempted again - break - } - - processed++ - responses = append(responses, bundleWithBlocks.Bundle) - } - - // remove processed promises from the list - remaining := bv.removeProcessedPromises(processed) - - return responses, remaining, nil -} - -func (bv *BatchVerifier) removeProcessedPromises(processed int) int { - count := len(bv.promises) - - if processed == 0 { - return count - } - - if processed == len(bv.promises) { - bv.promises = make([]*PromiseWithBlocks, 0) - return 0 - } - - bv.promises = bv.promises[processed:] - - return len(bv.promises) -} - -func (bv *BatchVerifier) syncPromise(request *verifier.VerifierRequest, blockNumbers []uint64) *PromiseWithBlocks { - valid := true - // simulate a die roll to determine if this is a good batch or not - // 1 in 6 chance of being a bad batch - // if rand.Intn(6) == 0 { - // valid = false - // } - - promise := verifier.NewPromiseSync[*verifier.VerifierBundleWithBlocks](func() (*verifier.VerifierBundleWithBlocks, error) { - response := &verifier.VerifierResponse{ - BatchNumber: request.BatchNumber, - BlockNumber: request.BlockNumber, - Valid: valid, - OriginalCounters: request.Counters, - Witness: nil, - ExecutorResponse: nil, - Error: nil, - } - bundle := verifier.NewVerifierBundle(request, response) - return &verifier.VerifierBundleWithBlocks{Blocks: blockNumbers, Bundle: bundle}, nil - }) - - return &PromiseWithBlocks{Blocks: blockNumbers, Promise: promise} -} - -func (bv *BatchVerifier) asyncPromise(request *verifier.VerifierRequest, blockNumbers []uint64) *PromiseWithBlocks { - promise := bv.legacyVerifier.CreateAsyncPromise(request, blockNumbers) - - return &PromiseWithBlocks{Blocks: blockNumbers, Promise: promise} -} +// import ( +// "errors" +// "fmt" +// "sync" + +// "github.com/gateway-fm/cdk-erigon-lib/common" +// "github.com/ledgerwatch/erigon/eth/ethconfig" +// verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" +// "github.com/ledgerwatch/log/v3" +// ) + +// type BatchVerifier struct { +// cfg *ethconfig.Zk +// legacyVerifier *verifier.LegacyExecutorVerifier +// hasExecutor bool +// forkId uint64 +// promises []*verifier.Promise[*verifier.VerifierBundle] +// mtxPromises *sync.Mutex +// // stop bool +// // errors chan error +// // finishCond *sync.Cond +// } + +// func NewBatchVerifier( +// cfg *ethconfig.Zk, +// hasExecutors bool, +// legacyVerifier *verifier.LegacyExecutorVerifier, +// forkId uint64, +// ) *BatchVerifier { +// return &BatchVerifier{ +// cfg: cfg, +// hasExecutor: hasExecutors, +// legacyVerifier: legacyVerifier, +// forkId: forkId, +// mtxPromises: &sync.Mutex{}, +// promises: make([]*verifier.Promise[*verifier.VerifierBundle], 0), +// // errors: make(chan error), +// // finishCond: sync.NewCond(&sync.Mutex{}), +// } +// } + +// func (bv *BatchVerifier) StartAsyncVerification( +// batchNumber uint64, +// blockNumber uint64, +// stateRoot common.Hash, +// counters map[string]int, +// blockNumbers []uint64, +// ) { +// request := verifier.NewVerifierRequest(batchNumber, blockNumber, bv.forkId, stateRoot, counters) + +// var promise *verifier.Promise[*verifier.VerifierBundle] +// if bv.hasExecutor { +// promise = bv.verifyWithExecutor(request, blockNumbers) +// } else { +// promise = bv.verifyWithoutExecutor(request, blockNumbers) +// } + +// bv.appendPromise(promise) +// } + +// func (bv *BatchVerifier) CheckProgress() ([]*verifier.VerifierBundle, int, error) { +// bv.mtxPromises.Lock() +// defer bv.mtxPromises.Unlock() + +// var verifierResponse []*verifier.VerifierBundle + +// // not a stop signal, so we can start to process our promises now +// for idx, promise := range bv.promises { +// verifierBundle, err := promise.TryGet() +// if verifierBundle == nil && err == nil { +// // If code enters here this means that this promise is not yet completed +// // We must processes responses sequentially so if this one is not ready we can just break +// break +// } + +// if err != nil { +// // let leave it for debug purposes +// // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error +// if errors.Is(err, verifier.ErrPromiseCancelled) { +// panic("this should never happen") +// } + +// log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) + +// if verifierBundle.Request.IsOverdue() { +// // signal an error, the caller can check on this and stop the process if needs be +// return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) +// } + +// // re-queue the task - it should be safe to replace the index of the slice here as we only add to it +// bv.promises[idx] = promise.CloneAndRerun() + +// // break now as we know we can't proceed here until this promise is attempted again +// break +// } + +// verifierResponse = append(verifierResponse, verifierBundle) +// } + +// // remove processed promises from the list +// bv.promises = bv.promises[len(verifierResponse):] + +// return verifierResponse, len(bv.promises), nil +// } + +// // func (bv *BatchVerifier) CancelAllRequestsUnsafe() { +// // bv.mtxPromises.Lock() +// // defer bv.mtxPromises.Unlock() + +// // // cancel all promises +// // // all queued promises will return ErrPromiseCancelled while getting its result +// // for _, p := range bv.promises { +// // p.Cancel() +// // } + +// // // the goal of this car is to ensure that running promises are stopped as soon as possible +// // // we need it because the promise's function must finish and then the promise checks if it has been cancelled +// // bv.legacyVerifier.cancelAllVerifications.Store(true) + +// // for _, e := range bv.legacyVerifier.executors { +// // // let's wait for all threads that are waiting to add to v.openRequests to finish +// // for e.QueueLength() > 0 { +// // time.Sleep(1 * time.Millisecond) +// // } +// // } + +// // bv.legacyVerifier.cancelAllVerifications.Store(false) + +// // bv.promises = make([]*verifier.Promise[*verifier.VerifierBundle], 0) +// // } + +// // func (bv *BatchVerifier) WaitForFinish() { +// // count := 0 +// // bv.mtxPromises.Lock() +// // count = len(bv.promises) +// // bv.mtxPromises.Unlock() + +// // if count > 0 { +// // bv.finishCond.L.Lock() +// // bv.finishCond.Wait() +// // bv.finishCond.L.Unlock() +// // } +// // } + +// func (bv *BatchVerifier) appendPromise(promise *verifier.Promise[*verifier.VerifierBundle]) { +// bv.mtxPromises.Lock() +// defer bv.mtxPromises.Unlock() +// bv.promises = append(bv.promises, promise) +// } + +// func (bv *BatchVerifier) verifyWithoutExecutor(request *verifier.VerifierRequest, blockNumbers []uint64) *verifier.Promise[*verifier.VerifierBundle] { +// valid := true +// // simulate a die roll to determine if this is a good batch or not +// // 1 in 6 chance of being a bad batch +// // if rand.Intn(6) == 0 { +// // valid = false +// // } + +// promise := verifier.NewPromise[*verifier.VerifierBundle](func() (*verifier.VerifierBundle, error) { +// response := &verifier.VerifierResponse{ +// BatchNumber: request.BatchNumber, +// BlockNumber: request.BlockNumber, +// Valid: valid, +// OriginalCounters: request.Counters, +// Witness: nil, +// ExecutorResponse: nil, +// Error: nil, +// } +// return verifier.NewVerifierBundle(request, response), nil +// }) +// promise.Wait() + +// return promise +// } + +// func (bv *BatchVerifier) verifyWithExecutor(request *verifier.VerifierRequest, blockNumbers []uint64) *verifier.Promise[*verifier.VerifierBundle] { +// return bv.legacyVerifier.VerifyAsync(request, blockNumbers) +// } diff --git a/zk/stages/stage_sequencer_interhashes.go b/zk/stages/stage_sequencer_interhashes.go index 2a1d0883f6d..07eff2e521d 100644 --- a/zk/stages/stage_sequencer_interhashes.go +++ b/zk/stages/stage_sequencer_interhashes.go @@ -5,58 +5,58 @@ import ( "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/turbo/shards" ) -type SequencerInterhashesCfg struct { - db kv.RwDB - accumulator *shards.Accumulator -} +// type SequencerInterhashesCfg struct { +// db kv.RwDB +// accumulator *shards.Accumulator +// } -func StageSequencerInterhashesCfg( - db kv.RwDB, - accumulator *shards.Accumulator, -) SequencerInterhashesCfg { - return SequencerInterhashesCfg{ - db: db, - accumulator: accumulator, - } -} +// func StageSequencerInterhashesCfg( +// db kv.RwDB, +// accumulator *shards.Accumulator, +// ) SequencerInterhashesCfg { +// return SequencerInterhashesCfg{ +// db: db, +// accumulator: accumulator, +// } +// } // This stages does NOTHING while going forward, because its done during execution +// Even this stage progress is updated in execution stage func SpawnSequencerInterhashesStage( s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, ctx context.Context, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, quiet bool, ) error { - var err error + // var err error - freshTx := tx == nil - if freshTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } + // freshTx := tx == nil + // if freshTx { + // tx, err = cfg.db.BeginRw(ctx) + // if err != nil { + // return err + // } + // defer tx.Rollback() + // } - to, err := s.ExecutionAt(tx) - if err != nil { - return err - } + // to, err := s.ExecutionAt(tx) + // if err != nil { + // return err + // } - if err := s.Update(tx, to); err != nil { - return err - } + // if err := s.Update(tx, to); err != nil { + // return err + // } - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } + // if freshTx { + // if err = tx.Commit(); err != nil { + // return err + // } + // } return nil } @@ -68,15 +68,15 @@ func UnwindSequencerInterhashsStage( s *stagedsync.StageState, tx kv.RwTx, ctx context.Context, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, ) error { - return UnwindZkIntermediateHashesStage(u, s, tx, ZkInterHashesCfg{}, ctx) + return UnwindZkIntermediateHashesStage(u, s, tx, cfg, ctx) } func PruneSequencerInterhashesStage( s *stagedsync.PruneState, tx kv.RwTx, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, ctx context.Context, ) error { return nil diff --git a/zk/stages/stages.go b/zk/stages/stages.go index d94be29059d..ad584d53366 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -17,7 +17,7 @@ func SequencerZkStages( l1InfoTreeCfg L1InfoTreeCfg, sequencerL1BlockSyncCfg SequencerL1BlockSyncCfg, dataStreamCatchupCfg DataStreamCatchupCfg, - sequencerInterhashesCfg SequencerInterhashesCfg, + // sequencerInterhashesCfg SequencerInterhashesCfg, exec SequenceBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, @@ -119,13 +119,13 @@ func SequencerZkStages( ID: stages2.IntermediateHashes, Description: "Sequencer Intermediate Hashes", Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencerInterhashesStage(s, u, tx, ctx, sequencerInterhashesCfg, quiet) + return SpawnSequencerInterhashesStage(s, u, tx, ctx, zkInterHashesCfg, quiet) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { - return UnwindSequencerInterhashsStage(u, s, tx, ctx, sequencerInterhashesCfg) + return UnwindSequencerInterhashsStage(u, s, tx, ctx, zkInterHashesCfg) }, Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { - return PruneSequencerInterhashesStage(p, tx, sequencerInterhashesCfg, ctx) + return PruneSequencerInterhashesStage(p, tx, zkInterHashesCfg, ctx) }, }, // { From 75b6051039dcaa4638f7bfc18d24acf97341a453 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 31 Jul 2024 08:39:06 +0000 Subject: [PATCH 11/33] add missing return err --- core/blockchain_zkevm.go | 3 +++ zk/legacy_executor_verifier/legacy_executor_verifier.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index 1208cb6782a..71c08feb99c 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -319,6 +319,9 @@ func FinalizeBlockExecutionWithHistoryWrite( isMining, excessDataGas, ) + if err != nil { + return nil, nil, nil, err + } if err := stateWriter.WriteHistory(); err != nil { return nil, nil, nil, fmt.Errorf("writing history for block %d failed: %w", header.Number.Uint64(), err) diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index da2a8c80ca2..eb41e366dd8 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -407,7 +407,7 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) if request.BlockNumber == 4 && counter == 0 { - ok = false + // ok = false counter = 1 } From 8c42301ca2927855af2d3224bc6bc8982709e13c Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 31 Jul 2024 11:09:24 +0000 Subject: [PATCH 12/33] add account + storage index into execution stage --- core/blockchain_zkevm.go | 66 +++++++++++----------- eth/stagedsync/stage_indexes_zkevm.go | 9 +++ zk/stages/stage_sequence_execute.go | 3 +- zk/stages/stage_sequence_execute_blocks.go | 17 +++++- zk/stages/stage_sequence_execute_state.go | 20 ++++--- zk/stages/stage_sequence_execute_utils.go | 9 ++- zk/stages/stages.go | 8 ++- 7 files changed, 84 insertions(+), 48 deletions(-) create mode 100644 eth/stagedsync/stage_indexes_zkevm.go diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index 71c08feb99c..6c338eabab1 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -296,39 +296,39 @@ func PrepareBlockTxExecution( return &blockContextImpl, excessDataGas, &blockGer, &blockL1BlockHash, nil } -func FinalizeBlockExecutionWithHistoryWrite( - engine consensus.Engine, stateReader state.StateReader, - header *types.Header, txs types.Transactions, uncles []*types.Header, - stateWriter state.WriterWithChangeSets, cc *chain.Config, - ibs *state.IntraBlockState, receipts types.Receipts, - withdrawals []*types.Withdrawal, headerReader consensus.ChainHeaderReader, - isMining bool, excessDataGas *big.Int, -) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { - newBlock, newTxs, newReceipt, err = FinalizeBlockExecution( - engine, - stateReader, - header, - txs, - uncles, - stateWriter, - cc, - ibs, - receipts, - withdrawals, - headerReader, - isMining, - excessDataGas, - ) - if err != nil { - return nil, nil, nil, err - } - - if err := stateWriter.WriteHistory(); err != nil { - return nil, nil, nil, fmt.Errorf("writing history for block %d failed: %w", header.Number.Uint64(), err) - } - - return newBlock, newTxs, newReceipt, nil -} +// func FinalizeBlockExecutionWithHistoryWrite( +// engine consensus.Engine, stateReader state.StateReader, +// header *types.Header, txs types.Transactions, uncles []*types.Header, +// stateWriter state.WriterWithChangeSets, cc *chain.Config, +// ibs *state.IntraBlockState, receipts types.Receipts, +// withdrawals []*types.Withdrawal, headerReader consensus.ChainHeaderReader, +// isMining bool, excessDataGas *big.Int, +// ) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { +// newBlock, newTxs, newReceipt, err = FinalizeBlockExecution( +// engine, +// stateReader, +// header, +// txs, +// uncles, +// stateWriter, +// cc, +// ibs, +// receipts, +// withdrawals, +// headerReader, +// isMining, +// excessDataGas, +// ) +// if err != nil { +// return nil, nil, nil, err +// } + +// if err := stateWriter.WriteHistory(); err != nil { +// return nil, nil, nil, fmt.Errorf("writing history for block %d failed: %w", header.Number.Uint64(), err) +// } + +// return newBlock, newTxs, newReceipt, nil +// } func CreateReceiptForBlockInfoTree(receipt *types.Receipt, chainConfig *chain.Config, blockNum uint64, execResult *ExecutionResult) *types.Receipt { // [hack]TODO: remove this after bug is fixed diff --git a/eth/stagedsync/stage_indexes_zkevm.go b/eth/stagedsync/stage_indexes_zkevm.go new file mode 100644 index 00000000000..716153d949d --- /dev/null +++ b/eth/stagedsync/stage_indexes_zkevm.go @@ -0,0 +1,9 @@ +package stagedsync + +import ( + "github.com/gateway-fm/cdk-erigon-lib/kv" +) + +func PromoteHistory(logPrefix string, tx kv.RwTx, changesetBucket string, start, stop uint64, cfg HistoryCfg, quit <-chan struct{}) error { + return promoteHistory(logPrefix, tx, changesetBucket, start, stop, cfg, quit) +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 3f675e18a29..860a818b047 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -26,6 +26,7 @@ func SpawnSequencingStage( rootTx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, + historyCfg stagedsync.HistoryCfg, quiet bool, ) (err error) { logPrefix := s.LogPrefix() @@ -58,7 +59,7 @@ func SpawnSequencingStage( return err } - batchContext := newBatchContext(ctx, &cfg, s, sdb) + batchContext := newBatchContext(ctx, &cfg, &historyCfg, s, sdb) batchState := newBatchState(forkId, prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed), !isLastBatchPariallyProcessed && cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0) // injected batch diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 1f274756732..657ac562904 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -154,7 +154,7 @@ func finaliseBlock( } } - finalBlock, finalTransactions, finalReceipts, err := core.FinalizeBlockExecutionWithHistoryWrite( + finalBlock, finalTransactions, finalReceipts, err := core.FinalizeBlockExecution( batchContext.cfg.engine, batchContext.sdb.stateReader, newHeader, @@ -173,6 +173,7 @@ func finaliseBlock( return nil, err } + // this is actually the interhashes stage newRoot, err := zkIncrementIntermediateHashes(batchContext.ctx, batchContext.s.LogPrefix(), batchContext.s, batchContext.sdb.tx, batchContext.sdb.eridb, batchContext.sdb.smt, newHeader.Number.Uint64()-1, newHeader.Number.Uint64()) if err != nil { return nil, err @@ -225,6 +226,20 @@ func finaliseBlock( return nil, fmt.Errorf("write block batch error: %v", err) } + // this is actually account + storage indices stages + quitCh := batchContext.ctx.Done() + from := newNum.Uint64() + if from == 1 { + from = 0 + } + to := newNum.Uint64() + 1 + if err = stagedsync.PromoteHistory(batchContext.s.LogPrefix(), batchContext.sdb.tx, kv.AccountChangeSet, from, to, *batchContext.historyCfg, quitCh); err != nil { + return nil, err + } + if err = stagedsync.PromoteHistory(batchContext.s.LogPrefix(), batchContext.sdb.tx, kv.StorageChangeSet, from, to, *batchContext.historyCfg, quitCh); err != nil { + return nil, err + } + return finalBlock, nil } diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 623acdf1950..c883f2de20d 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -15,18 +15,20 @@ import ( ) type BatchContext struct { - ctx context.Context - cfg *SequenceBlockCfg - s *stagedsync.StageState - sdb *stageDb + ctx context.Context + cfg *SequenceBlockCfg + historyCfg *stagedsync.HistoryCfg + s *stagedsync.StageState + sdb *stageDb } -func newBatchContext(ctx context.Context, cfg *SequenceBlockCfg, s *stagedsync.StageState, sdb *stageDb) *BatchContext { +func newBatchContext(ctx context.Context, cfg *SequenceBlockCfg, historyCfg *stagedsync.HistoryCfg, s *stagedsync.StageState, sdb *stageDb) *BatchContext { return &BatchContext{ - ctx: ctx, - cfg: cfg, - s: s, - sdb: sdb, + ctx: ctx, + cfg: cfg, + historyCfg: historyCfg, + s: s, + sdb: sdb, } } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 051a4caadbe..3cb908e24f2 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -325,10 +325,17 @@ func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1In } if !unwinding { - // Update interhashes stage progress if err := stages.SaveStageProgress(tx, stages.IntermediateHashes, newHeight); err != nil { return err } + + if err := stages.SaveStageProgress(tx, stages.AccountHistoryIndex, newHeight); err != nil { + return err + } + + if err := stages.SaveStageProgress(tx, stages.StorageHistoryIndex, newHeight); err != nil { + return err + } } return nil diff --git a/zk/stages/stages.go b/zk/stages/stages.go index ad584d53366..08746571c28 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -106,7 +106,7 @@ func SequencerZkStages( ID: stages2.Execution, Description: "Sequence transactions", Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencingStage(s, u, tx, ctx, exec, quiet) + return SpawnSequencingStage(s, u, tx, ctx, exec, history, quiet) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return UnwindSequenceExecutionStage(u, s, tx, ctx, exec, firstCycle) @@ -175,7 +175,8 @@ func SequencerZkStages( Description: "Generate account history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return stages.SpawnAccountHistoryIndex(s, tx, history, ctx) + return nil + // return stages.SpawnAccountHistoryIndex(s, tx, history, ctx) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindAccountHistoryIndex(u, s, tx, history, ctx) @@ -189,7 +190,8 @@ func SequencerZkStages( Description: "Generate storage history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return stages.SpawnStorageHistoryIndex(s, tx, history, ctx) + return nil + // return stages.SpawnStorageHistoryIndex(s, tx, history, ctx) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindStorageHistoryIndex(u, s, tx, history, ctx) From 2ba343d2fe25118a71f2da39449126bebea3d219 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 31 Jul 2024 15:44:10 +0000 Subject: [PATCH 13/33] update unwind and stages processing --- zk/datastream/server/data_stream_server.go | 57 ++++++++---- zk/datastream/server/datastream_populate.go | 8 +- .../legacy_executor_verifier.go | 29 +++--- zk/stages/stage_sequence_execute.go | 90 ++++++------------- zk/stages/stage_sequence_execute_batch.go | 27 ++---- zk/stages/stage_sequence_execute_blocks.go | 7 +- .../stage_sequence_execute_data_stream.go | 26 ++++-- .../stage_sequence_execute_injected_batch.go | 12 +-- .../stage_sequence_execute_transactions.go | 4 +- zk/stages/stage_sequence_execute_unwind.go | 11 ++- zk/stages/stage_sequence_execute_utils_db.go | 8 +- zk/stages/stages.go | 2 +- 12 files changed, 134 insertions(+), 147 deletions(-) diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 14535999850..0d495e60bc1 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -455,24 +455,12 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return *srv.highestBatchWritten, nil } - header := srv.stream.GetHeader() - - if header.TotalEntries == 0 { - return 0, nil + entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchStart)) + if err != nil { + return 0, err } - - entryNum := header.TotalEntries - 1 - var err error - var entry datastreamer.FileEntry - for { - entry, err = srv.stream.GetEntry(entryNum) - if err != nil { - return 0, err - } - if entry.Type == datastreamer.EntryType(1) { - break - } - entryNum -= 1 + if !found { + return 0, nil } batch, err := types.UnmarshalBatchStart(entry.Data) @@ -485,6 +473,23 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return batch.Number, nil } +func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { + entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchEnd)) + if err != nil { + return 0, err + } + if !found { + return 0, nil + } + + batch, err := types.UnmarshalBatchEnd(entry.Data) + if err != nil { + return 0, err + } + + return batch.Number, nil +} + // must be done on offline server // finds the position of the block bookmark entry and deletes from it onward // blockNumber 10 would return the stream to before block 10 bookmark @@ -524,3 +529,21 @@ func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { return srv.stream.TruncateFile(entryNum) } + +func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { + header := srv.stream.GetHeader() + emtryEntry := datastreamer.FileEntry{} + + // loop will become infinite if using unsigned type + for entryNum := int64(header.TotalEntries - 1); entryNum >= 0; entryNum-- { + entry, err := srv.stream.GetEntry(uint64(entryNum)) + if err != nil { + return emtryEntry, false, err + } + if entry.Type == entryType { + return entry, true, nil + } + } + + return emtryEntry, false, nil +} diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index a44cbb99e8d..839cb9029c2 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -317,11 +317,15 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader func (srv *DataStreamServer) WriteBatchEnd( reader DbReader, - batchNumber, - lastBatchNumber uint64, + batchNumber uint64, stateRoot *common.Hash, localExitRoot *common.Hash, ) (err error) { + lastBatchNumber, err := srv.GetHighestClosedBatch() + if err != nil { + return err + } + gers, err := reader.GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber) if err != nil { return err diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index eb41e366dd8..73b02449d79 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -331,10 +331,6 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil return NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { verifierBundle := NewVerifierBundle(request, nil) - // bundleWithBlocks := &VerifierBundle{ - // Blocks: blockNumbers, - // Bundle: verifierBundle, - // } e := v.GetNextOnlineAvailableExecutor() if e == nil { @@ -368,7 +364,7 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb return verifierBundle, err } - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) + witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, innerCtx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) if err != nil { return verifierBundle, err } @@ -407,7 +403,7 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) if request.BlockNumber == 4 && counter == 0 { - // ok = false + ok = false counter = 1 } @@ -434,18 +430,11 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb } func (v *LegacyExecutorVerifier) VerifyWithoutExecutor(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { - valid := true - // simulate a die roll to determine if this is a good batch or not - // 1 in 6 chance of being a bad batch - // if rand.Intn(6) == 0 { - // valid = false - // } - promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { response := &VerifierResponse{ BatchNumber: request.BatchNumber, BlockNumber: request.BlockNumber, - Valid: valid, + Valid: true, OriginalCounters: request.Counters, Witness: nil, ExecutorResponse: nil, @@ -458,7 +447,7 @@ func (v *LegacyExecutorVerifier) VerifyWithoutExecutor(request *VerifierRequest, return promise } -func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle, int, error) { +func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle, error) { v.mtxPromises.Lock() defer v.mtxPromises.Unlock() @@ -484,7 +473,7 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle if verifierBundle.Request.IsOverdue() { // signal an error, the caller can check on this and stop the process if needs be - return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) + return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) } // re-queue the task - it should be safe to replace the index of the slice here as we only add to it @@ -500,7 +489,7 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle // remove processed promises from the list v.promises = v.promises[len(verifierResponse):] - return verifierResponse, len(v.promises), nil + return verifierResponse, nil } // func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { @@ -596,6 +585,12 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle // delete(v.addedBatches, batchNumber) // } +func (v *LegacyExecutorVerifier) Wait() { + for _, p := range v.promises { + p.Wait() + } +} + func (v *LegacyExecutorVerifier) CancelAllRequests() { // cancel all promises // all queued promises will return ErrPromiseCancelled while getting its result diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 860a818b047..7b7711cc9d1 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -6,7 +6,6 @@ import ( "time" "github.com/gateway-fm/cdk-erigon-lib/common" - "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common/math" @@ -23,7 +22,6 @@ import ( func SpawnSequencingStage( s *stagedsync.StageState, u stagedsync.Unwinder, - rootTx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, historyCfg stagedsync.HistoryCfg, @@ -33,7 +31,7 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Starting sequencing stage", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Finished sequencing stage", logPrefix)) - sdb, err := newStageDb(ctx, rootTx, cfg.db) + sdb, err := newStageDb(ctx, cfg.db) if err != nil { return err } @@ -75,16 +73,6 @@ func SpawnSequencingStage( return sdb.tx.Commit() } - // if !isLastBatchPariallyProcessed { - // handle case where batch wasn't closed properly - // close it before starting a new one - // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted - // and datastream was regenerated - if err = finalizeLastBatchInDatastreamIfNotFinalized(batchContext, batchState, executionAt); err != nil { - return err - } - // } - if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, sdb.hermezDb, logPrefix); err != nil { return err } @@ -94,10 +82,20 @@ func SpawnSequencingStage( return err } - // check if we just unwound from a bad executor response and if we did just close the batch here - handled, err := doInstantCloseIfNeeded(batchContext, batchState, batchCounters) - if err != nil || handled { - return err + if isLastBatchPariallyProcessed { + // check if we just unwound from a bad executor response and if we did just close the batch here + handled, err := doInstantCloseIfNeeded(batchContext, batchState, batchCounters) + if err != nil || handled { + return err + } + } else { + // handle case where batch wasn't closed properly + // close it before starting a new one + // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted + // and datastream was regenerated + if err = finalizeLastBatchInDatastreamIfNotFinalized(batchContext, batchState, executionAt); err != nil { + return err + } } batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) @@ -106,19 +104,9 @@ func SpawnSequencingStage( defer nonEmptyBatchTimer.Stop() runLoopBlocks := true - blockDataSizeChecker := NewBlockDataChecker() batchDataOverflow := false - - // batchVerifier := NewBatchVerifier(cfg.zk, batchState.hasExecutorForThisBatch, cfg.legacyVerifier, batchState.forkId) - streamWriter := &SequencerBatchStreamWriter{ - ctx: ctx, - logPrefix: logPrefix, - legacyVerifier: cfg.legacyVerifier, - sdb: sdb, - streamServer: cfg.datastreamServer, - hasExecutors: batchState.hasExecutorForThisBatch, - lastBatch: lastBatch, - } + blockDataSizeChecker := NewBlockDataChecker() + streamWriter := newSequencerBatchStreamWriter(batchContext, batchState, lastBatch) limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(batchState.batchNumber) limboRecovery := limboTxHash != nil @@ -152,11 +140,7 @@ func SpawnSequencingStage( } } - if !isLastBatchPariallyProcessed { - log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) - } else { - log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, batchState.batchNumber, executionAt+1)) - } + log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { @@ -208,17 +192,7 @@ func SpawnSequencingStage( batchState.blockState.builtBlockElements.resetBlockBuildingArrays() parentRoot := parentBlock.Root() - if err = handleStateForNewBlockStarting( - cfg.chainConfig, - sdb.hermezDb, - ibs, - blockNumber, - batchState.batchNumber, - header.Time, - &parentRoot, - l1TreeUpdate, - shouldWriteGerToContract, - ); err != nil { + if err = handleStateForNewBlockStarting(batchContext, ibs, blockNumber, batchState.batchNumber, header.Time, &parentRoot, l1TreeUpdate, shouldWriteGerToContract); err != nil { return err } @@ -308,7 +282,7 @@ func SpawnSequencingStage( } if !batchState.isL1Recovery() { - log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", batchState.batchNumber, "tx-hash", txHash, "has any transactions in this batch", batchState.hasAnyTransactionsInThisBatch) + log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", batchState.batchNumber, "tx-hash", txHash, "has-any-transactions-in-this-batch", batchState.hasAnyTransactionsInThisBatch) /* There are two cases when overflow could occur. 1. The block DOES not contains any transactions. @@ -323,16 +297,15 @@ func SpawnSequencingStage( cfg.txPool.MarkForDiscardFromPendingBest(txHash) log.Trace(fmt.Sprintf("single transaction %s overflow counters", txHash)) } + + runLoopBlocks = false + break LOOP_TRANSACTIONS } - //TODO: Why do we break the loop in case of l1Recovery?! - break LOOP_TRANSACTIONS } if err == nil { blockDataSizeChecker = &backupDataSizeChecker - //TODO: Does no make any sense to remove last added tx - batchState.yieldedTransactions.Remove(txHash) batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) nonEmptyBatchTimer.Reset(cfg.zk.SequencerNonEmptyBatchSealTime) @@ -406,7 +379,7 @@ func SpawnSequencingStage( cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, batchState.hasExecutorForThisBatch) // check for new responses from the verifier - needsUnwind, _, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) // lets commit everything after updateStreamAndCheckRollback no matter of its result if errCommitAndStart := sdb.CommitAndStart(); errCommitAndStart != nil { @@ -414,21 +387,16 @@ func SpawnSequencingStage( } defer sdb.tx.Rollback() - // check the return values of updateStreamAndCheckRollback and CommitAndStart + // check the return values of updateStreamAndCheckRollback if err != nil || needsUnwind { return err } } - for { - needsUnwind, remaining, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) - if err != nil || needsUnwind { - return err - } - if remaining == 0 { - break - } - time.Sleep(50 * time.Millisecond) + cfg.legacyVerifier.Wait() + needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + if err != nil || needsUnwind { + return err } if err = runBatchLastSteps(batchContext, batchState.batchNumber, block.NumberU64(), batchCounters); err != nil { diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 91376bed9b2..8c0411e6a35 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -55,15 +55,9 @@ func doInstantCloseIfNeeded(batchContext *BatchContext, batchState *BatchState, // only close this batch down if we actually made any progress in it, otherwise // just continue processing as normal and recreate the batch from scratch if len(blocks) > 0 { - // if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { - // return false, err - // } if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { return false, err } - if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { - return false, err - } if err = updateSequencerProgress(batchContext.sdb.tx, blocks[len(blocks)-1], batchState.batchNumber, 1, false); err != nil { return false, err } @@ -124,10 +118,10 @@ func updateStreamAndCheckRollback( batchState *BatchState, streamWriter *SequencerBatchStreamWriter, u stagedsync.Unwinder, -) (bool, int, error) { - committed, remaining, err := streamWriter.CommitNewUpdates() +) (bool, error) { + committed, err := streamWriter.CommitNewUpdates() if err != nil { - return false, remaining, err + return false, err } for _, commit := range committed { @@ -137,12 +131,7 @@ func updateStreamAndCheckRollback( // we are about to unwind so place the marker ready for this to happen if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { - return false, 0, err - } - // capture the fork otherwise when the loop starts again to close - // off the batch it will detect it as a fork upgrade - if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { - return false, 0, err + return false, err } unwindTo := commit.BlockNumber - 1 @@ -151,17 +140,17 @@ func updateStreamAndCheckRollback( // causing the unwind. unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, commit.BlockNumber) if unwindHeader == nil { - return false, 0, fmt.Errorf("could not find header for block %d", commit.BlockNumber) + return false, fmt.Errorf("could not find header for block %d", commit.BlockNumber) } log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) u.UnwindTo(unwindTo, unwindHeader.Hash()) streamWriter.legacyVerifier.CancelAllRequests() - return true, 0, nil + return true, nil } - return false, remaining, nil + return false, nil } func runBatchLastSteps( @@ -208,7 +197,7 @@ func runBatchLastSteps( return err } blockRoot := block.Root() - if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, thisBatch, thisBatch, &blockRoot, &ler); err != nil { + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, thisBatch, &blockRoot, &ler); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 657ac562904..bfae01f81a6 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -8,7 +8,6 @@ import ( "math/big" - "github.com/ledgerwatch/erigon/chain" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -23,8 +22,7 @@ import ( ) func handleStateForNewBlockStarting( - chainConfig *chain.Config, - hermezDb *hermez_db.HermezDb, + batchContext *BatchContext, ibs *state.IntraBlockState, blockNumber uint64, batchNumber uint64, @@ -33,6 +31,9 @@ func handleStateForNewBlockStarting( l1info *zktypes.L1InfoTreeUpdate, shouldWriteGerToContract bool, ) error { + chainConfig := batchContext.cfg.chainConfig + hermezDb := batchContext.sdb.hermezDb + ibs.PreExecuteStateSet(chainConfig, blockNumber, timestamp, stateRoot) // handle writing to the ger manager contract but only if the index is above 0 diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index fe72404cb03..0da9dd0e67a 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -21,29 +21,41 @@ type SequencerBatchStreamWriter struct { lastBatch uint64 } +func newSequencerBatchStreamWriter(batchContext *BatchContext, batchState *BatchState, lastBatch uint64) *SequencerBatchStreamWriter { + return &SequencerBatchStreamWriter{ + ctx: batchContext.ctx, + logPrefix: batchContext.s.LogPrefix(), + legacyVerifier: batchContext.cfg.legacyVerifier, + sdb: batchContext.sdb, + streamServer: batchContext.cfg.datastreamServer, + hasExecutors: batchState.hasExecutorForThisBatch, + lastBatch: lastBatch, + } +} + type BlockStatus struct { BlockNumber uint64 Valid bool Error error } -func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]BlockStatus, int, error) { +func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]BlockStatus, error) { var written []BlockStatus - responses, remaining, err := sbc.legacyVerifier.ProcessResultsSequentially() + responses, err := sbc.legacyVerifier.ProcessResultsSequentially() if err != nil { - return written, remaining, err + return written, err } if len(responses) == 0 { - return written, remaining, nil + return written, nil } written, err = sbc.writeBlockDetailsToDatastream(responses) if err != nil { - return written, remaining, err + return written, err } - return written, remaining, nil + return written, nil } func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBundles []*verifier.VerifierBundle) ([]BlockStatus, error) { @@ -109,7 +121,7 @@ func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, bat return err } root := lastBlock.Root() - if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchState.batchNumber, batchState.batchNumber-1, &root, &ler); err != nil { + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchState.batchNumber-1, &root, &ler); err != nil { return err } return nil diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 492fa98afea..5c3100788ab 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -59,17 +59,7 @@ func processInjectedInitialBatch( header.Time = injected.Timestamp parentRoot := parentBlock.Root() - if err = handleStateForNewBlockStarting( - batchContext.cfg.chainConfig, - batchContext.sdb.hermezDb, - ibs, - injectedBatchBlockNumber, - injectedBatchBatchNumber, - injected.Timestamp, - &parentRoot, - fakeL1TreeUpdate, - true, - ); err != nil { + if err = handleStateForNewBlockStarting(batchContext, ibs, injectedBatchBlockNumber, injectedBatchBatchNumber, injected.Timestamp, &parentRoot, fakeL1TreeUpdate, true); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 72533542b4b..d942ea4ad1b 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -176,7 +176,7 @@ func attemptAddTransaction( if overflow { ibs.RevertToSnapshot(snapshot) - return nil, nil, true, err + return nil, nil, true, nil } // add the gas only if not reverted. This should not be moved above the overflow check @@ -190,5 +190,5 @@ func attemptAddTransaction( ibs.FinalizeTx(evm.ChainRules(), noop) - return receipt, execResult, overflow, err + return receipt, execResult, false, nil } diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index e2f6d0a1436..bced0c57dc1 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -100,6 +100,15 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("get toBatch no by l2 block error: %v", err) } + lastBatchToKeepBeforeFrom, err := hermezDb.GetBatchNoByL2Block(u.UnwindPoint) + if err != nil { + return fmt.Errorf("get fromBatch no by l2 block error: %v", err) + } + fromBatchForForkIdDeletion := fromBatch + if lastBatchToKeepBeforeFrom == fromBatch { + fromBatchForForkIdDeletion++ + } + // only seq if err = hermezDb.TruncateLatestUsedGers(fromBatch); err != nil { return fmt.Errorf("truncate latest used gers error: %v", err) @@ -121,7 +130,7 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("truncate block batches error: %v", err) } // only seq - if err = hermezDb.TruncateForkId(fromBatch, toBatch); err != nil { + if err = hermezDb.TruncateForkId(fromBatchForForkIdDeletion, toBatch); err != nil { return fmt.Errorf("truncate fork id error: %v", err) } diff --git a/zk/stages/stage_sequence_execute_utils_db.go b/zk/stages/stage_sequence_execute_utils_db.go index cbc98015383..4484d9dbd72 100644 --- a/zk/stages/stage_sequence_execute_utils_db.go +++ b/zk/stages/stage_sequence_execute_utils_db.go @@ -2,7 +2,6 @@ package stages import ( "context" - "fmt" "github.com/gateway-fm/cdk-erigon-lib/kv" @@ -23,11 +22,8 @@ type stageDb struct { smt *smtNs.SMT } -func newStageDb(ctx context.Context, tx kv.RwTx, db kv.RwDB) (sdb *stageDb, err error) { - if tx != nil { - return nil, fmt.Errorf("sequencer cannot use global db's tx object, because it commits the tx object itself") - } - +func newStageDb(ctx context.Context, db kv.RwDB) (sdb *stageDb, err error) { + var tx kv.RwTx if tx, err = db.BeginRw(ctx); err != nil { return nil, err } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index 08746571c28..92b236b1835 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -106,7 +106,7 @@ func SequencerZkStages( ID: stages2.Execution, Description: "Sequence transactions", Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencingStage(s, u, tx, ctx, exec, history, quiet) + return SpawnSequencingStage(s, u, ctx, exec, history, quiet) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return UnwindSequenceExecutionStage(u, s, tx, ctx, exec, firstCycle) From e8d8d9b471dd910d82a86965493fc82e4cc43682 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Thu, 1 Aug 2024 13:54:31 +0000 Subject: [PATCH 14/33] limbo --- turbo/stages/zk_stages.go | 2 - .../legacy_executor_verifier.go | 320 +-------------- zk/stages/stage_sequence_execute.go | 85 ++-- zk/stages/stage_sequence_execute_batch.go | 34 +- .../stage_sequence_execute_data_stream.go | 52 +-- zk/stages/stage_sequence_execute_limbo.go | 110 ++++-- zk/stages/stage_sequence_execute_state.go | 48 ++- zk/stages/stage_sequence_execute_utils.go | 2 +- zk/stages/stage_sequence_execute_verifier.go | 180 --------- zk/stages/stage_sequencer_executor_verify.go | 368 ------------------ .../stage_sequencer_executor_verify_limbo.go | 56 --- zk/stages/stage_sequencer_interhashes.go | 41 -- zk/stages/stages.go | 21 +- zk/txpool/pool_zk_limbo_processor.go | 86 ++-- 14 files changed, 271 insertions(+), 1134 deletions(-) delete mode 100644 zk/stages/stage_sequence_execute_verifier.go delete mode 100644 zk/stages/stage_sequencer_executor_verify.go delete mode 100644 zk/stages/stage_sequencer_executor_verify_limbo.go diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index ccb39c75304..2911c26808e 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -117,7 +117,6 @@ func NewSequencerZkStages(ctx context.Context, zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), - // zkStages.StageSequencerInterhashesCfg(db, notifications.Accumulator), zkStages.StageSequenceBlocksCfg( db, cfg.Prune, @@ -143,7 +142,6 @@ func NewSequencerZkStages(ctx context.Context, ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), - // zkStages.StageSequencerExecutorVerifyCfg(db, verifier, txPool, controlServer.ChainConfig, cfg.Zk), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 73b02449d79..3797e89d002 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -29,17 +29,17 @@ var ErrNoExecutorAvailable = fmt.Errorf("no executor available") type VerifierRequest struct { BatchNumber uint64 - BlockNumber uint64 + BlockNumbers []uint64 ForkId uint64 StateRoot common.Hash Counters map[string]int creationTime time.Time } -func NewVerifierRequest(batchNumber, blockNumber, forkId uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { +func NewVerifierRequest(forkId, batchNumber uint64, blockNumbers []uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { return &VerifierRequest{ BatchNumber: batchNumber, - BlockNumber: blockNumber, + BlockNumbers: blockNumbers, ForkId: forkId, StateRoot: stateRoot, Counters: counters, @@ -51,9 +51,11 @@ func (vr *VerifierRequest) IsOverdue() bool { return time.Since(vr.creationTime) > time.Duration(30*time.Minute) } +func (vr *VerifierRequest) GetLastBlockNumber() uint64 { + return vr.BlockNumbers[len(vr.BlockNumbers)-1] +} + type VerifierResponse struct { - BatchNumber uint64 - BlockNumber uint64 Valid bool Witness []byte ExecutorResponse *executor.ProcessBatchResponseV2 @@ -84,21 +86,11 @@ type LegacyExecutorVerifier struct { executorNumber int cancelAllVerifications atomic.Bool - // quit chan struct{} - streamServer *server.DataStreamServer witnessGenerator WitnessGenerator promises []*Promise[*VerifierBundle] mtxPromises *sync.Mutex - // addedBatches map[uint64]struct{} - - // these three items are used to keep track of where the datastream is at - // compared with the executor checks. It allows for data to arrive in strange - // orders and will backfill the stream as needed. - // lowestWrittenBatch uint64 - // responsesToWrite map[uint64]struct{} - // responsesMtx *sync.Mutex } func NewLegacyExecutorVerifier( @@ -116,14 +108,10 @@ func NewLegacyExecutorVerifier( executors: executors, executorNumber: 0, cancelAllVerifications: atomic.Bool{}, - // quit: make(chan struct{}), - streamServer: streamServer, - witnessGenerator: witnessGenerator, - promises: make([]*Promise[*VerifierBundle], 0), - mtxPromises: &sync.Mutex{}, - // addedBatches: make(map[uint64]struct{}), - // responsesToWrite: map[uint64]struct{}{}, - // responsesMtx: &sync.Mutex{}, + streamServer: streamServer, + witnessGenerator: witnessGenerator, + promises: make([]*Promise[*VerifierBundle], 0), + mtxPromises: &sync.Mutex{}, } } @@ -137,7 +125,7 @@ func (v *LegacyExecutorVerifier) StartAsyncVerification( ) { var promise *Promise[*VerifierBundle] - request := NewVerifierRequest(batchNumber, blockNumbers[len(blockNumbers)-1], forkId, stateRoot, counters) + request := NewVerifierRequest(forkId, batchNumber, blockNumbers, stateRoot, counters) if useRemoteExecutor { promise = v.VerifyAsync(request, blockNumbers) } else { @@ -187,143 +175,6 @@ func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, return executorErr } -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -// func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequencerBatchSealTime time.Duration) *Promise[*VerifierBundle] { -// // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later -// // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil -// promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { -// verifierBundle := NewVerifierBundle(request, nil) - -// e := v.GetNextOnlineAvailableExecutor() -// if e == nil { -// return verifierBundle, ErrNoExecutorAvailable -// } - -// t := utils.StartTimer("legacy-executor-verifier", "add-request-unsafe") - -// e.AquireAccess() -// defer e.ReleaseAccess() -// if v.cancelAllVerifications.Load() { -// return nil, ErrPromiseCancelled -// } - -// var err error -// var blocks []uint64 -// startTime := time.Now() -// ctx := context.Background() -// // mapmutation has some issue with us not having a quit channel on the context call to `Done` so -// // here we're creating a cancelable context and just deferring the cancel -// innerCtx, cancel := context.WithCancel(ctx) -// defer cancel() - -// // get the data stream bytes -// for time.Since(startTime) < 3*sequencerBatchSealTime { -// // we might not have blocks yet as the underlying stage loop might still be running and the tx hasn't been -// // committed yet so just requeue the request -// blocks, err = v.availableBlocksToProcess(innerCtx, request.BatchNumber) -// if err != nil { -// return verifierBundle, err -// } - -// if len(blocks) > 0 { -// break -// } - -// time.Sleep(time.Second) -// } - -// if len(blocks) == 0 { -// return verifierBundle, fmt.Errorf("still not blocks in this batch") -// } - -// tx, err := v.db.BeginRo(innerCtx) -// if err != nil { -// return verifierBundle, err -// } -// defer tx.Rollback() - -// hermezDb := hermez_db.NewHermezDbReader(tx) - -// l1InfoTreeMinTimestamps := make(map[uint64]uint64) -// streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) -// if err != nil { -// return verifierBundle, err -// } - -// witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blocks[0], blocks[len(blocks)-1], false, v.cfg.WitnessFull) -// if err != nil { -// return nil, err -// } - -// log.Debug("witness generated", "data", hex.EncodeToString(witness)) - -// // now we need to figure out the timestamp limit for this payload. It must be: -// // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp -// // so to ensure we have a good value we can take the timestamp of the last block in the batch -// // and just add 5 minutes -// lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[len(blocks)-1]) -// if err != nil { -// return verifierBundle, err -// } - -// // executor is perfectly happy with just an empty hash here -// oldAccInputHash := common.HexToHash("0x0") -// timestampLimit := lastBlock.Time() -// payload := &Payload{ -// Witness: witness, -// DataStream: streamBytes, -// Coinbase: v.cfg.AddressSequencer.String(), -// OldAccInputHash: oldAccInputHash.Bytes(), -// L1InfoRoot: nil, -// TimestampLimit: timestampLimit, -// ForcedBlockhashL1: []byte{0}, -// ContextId: strconv.FormatUint(request.BatchNumber, 10), -// L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, -// } - -// previousBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) -// if err != nil { -// return verifierBundle, err -// } - -// ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) -// if executorErr != nil { -// if errors.Is(executorErr, ErrExecutorStateRootMismatch) { -// log.Error("[Verifier] State root mismatch detected", "err", executorErr) -// } else if errors.Is(executorErr, ErrExecutorUnknownError) { -// log.Error("[Verifier] Unexpected error found from executor", "err", executorErr) -// } else { -// log.Error("[Verifier] Error", "err", executorErr) -// } -// } - -// // log timing w/o stream write -// t.LogTimer() - -// if ok { -// if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { -// log.Error("error writing data to stream", "err", err) -// } -// } - -// verifierBundle.Response = &VerifierResponse{ -// BatchNumber: request.BatchNumber, -// Valid: ok, -// Witness: witness, -// ExecutorResponse: executorResponse, -// Error: executorErr, -// } -// return verifierBundle, nil -// }) - -// // add batch to the list of batches we've added -// v.addedBatches[request.BatchNumber] = struct{}{} - -// // add the promise to the list of promises -// v.promises = append(v.promises, promise) -// return promise -// } - var counter = 0 func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { @@ -402,7 +253,7 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) - if request.BlockNumber == 4 && counter == 0 { + if request.GetLastBlockNumber() == 9 && counter == 0 { ok = false counter = 1 } @@ -418,8 +269,6 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb } verifierBundle.Response = &VerifierResponse{ - BatchNumber: request.BatchNumber, - BlockNumber: request.BlockNumber, Valid: ok, Witness: witness, ExecutorResponse: executorResponse, @@ -432,8 +281,8 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb func (v *LegacyExecutorVerifier) VerifyWithoutExecutor(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { response := &VerifierResponse{ - BatchNumber: request.BatchNumber, - BlockNumber: request.BlockNumber, + // BatchNumber: request.BatchNumber, + // BlockNumber: request.BlockNumber, Valid: true, OriginalCounters: request.Counters, Witness: nil, @@ -492,99 +341,6 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle return verifierResponse, nil } -// func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { -// t := utils.StartTimer("legacy-executor-verifier", "check-and-write-to-stream") -// defer t.LogTimer() - -// v.responsesMtx.Lock() -// defer v.responsesMtx.Unlock() - -// v.responsesToWrite[newBatch] = struct{}{} - -// // if we haven't written anything yet - cold start of the node -// if v.lowestWrittenBatch == 0 { -// // we haven't written anything yet so lets make sure there is no gap -// // in the stream for this batch -// latestBatch, err := v.streamServer.GetHighestBatchNumber() -// if err != nil { -// return err -// } -// log.Info("[Verifier] Initialising on cold start", "latestBatch", latestBatch, "newBatch", newBatch) - -// v.lowestWrittenBatch = latestBatch - -// // check if we have the next batch we're waiting for -// if latestBatch == newBatch-1 { -// if err := v.WriteBatchToStream(newBatch, hdb, tx); err != nil { -// return err -// } -// v.lowestWrittenBatch = newBatch -// delete(v.responsesToWrite, newBatch) -// } -// } - -// // now check if the batch we want next is good -// for { -// // check if we have the next batch to write -// nextBatch := v.lowestWrittenBatch + 1 -// if _, ok := v.responsesToWrite[nextBatch]; !ok { -// break -// } - -// if err := v.WriteBatchToStream(nextBatch, hdb, tx); err != nil { -// return err -// } -// delete(v.responsesToWrite, nextBatch) -// v.lowestWrittenBatch = nextBatch -// } - -// return nil -// } - -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -// func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([]*VerifierResponse, error) { -// results := make([]*VerifierResponse, 0, len(v.promises)) -// for i := 0; i < len(v.promises); i++ { -// verifierBundle, err := v.promises[i].TryGet() -// if verifierBundle == nil && err == nil { -// break -// } - -// if err != nil { -// // let leave it for debug purposes -// // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error -// if errors.Is(err, ErrPromiseCancelled) { -// panic("this should never happen") -// } - -// log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) -// // this is an error on our end, so just re-create the promise at exact position where it was -// if verifierBundle.Request.IsOverdue() { -// return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) -// } - -// v.promises[i] = NewPromise[*VerifierBundle](v.promises[i].task) -// break -// } - -// verifierResponse := verifierBundle.Response -// results = append(results, verifierResponse) -// delete(v.addedBatches, verifierResponse.BatchNumber) - -// // no point to process any further responses if we've found an invalid one -// if !verifierResponse.Valid { -// break -// } -// } - -// return results, nil -// } - -// func (v *LegacyExecutorVerifier) MarkTopResponseAsProcessed(batchNumber uint64) { -// v.promises = v.promises[1:] -// delete(v.addedBatches, batchNumber) -// } - func (v *LegacyExecutorVerifier) Wait() { for _, p := range v.promises { p.Wait() @@ -614,26 +370,6 @@ func (v *LegacyExecutorVerifier) CancelAllRequests() { v.promises = make([]*Promise[*VerifierBundle], 0) } -// // Unsafe is not thread-safe so it MUST be invoked only from a single thread -// func (v *LegacyExecutorVerifier) HasExecutorsUnsafe() bool { -// return len(v.executors) > 0 -// } - -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -// func (v *LegacyExecutorVerifier) IsRequestAddedUnsafe(batch uint64) bool { -// _, ok := v.addedBatches[batch] -// return ok -// } - -// func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *hermez_db.HermezDbReader, roTx kv.Tx) error { -// log.Info("[Verifier] Writing batch to stream", "batch", batchNumber) - -// if err := v.streamServer.WriteWholeBatchToStream("verifier", roTx, hdb, v.lowestWrittenBatch, batchNumber); err != nil { -// return err -// } -// return nil -// } - func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { var exec *Executor @@ -655,32 +391,6 @@ func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { return exec } -// func (v *LegacyExecutorVerifier) availableBlocksToProcess(innerCtx context.Context, batchNumber uint64) ([]uint64, error) { -// tx, err := v.db.BeginRo(innerCtx) -// if err != nil { -// return []uint64{}, err -// } -// defer tx.Rollback() - -// hermezDb := hermez_db.NewHermezDbReader(tx) -// blocks, err := hermezDb.GetL2BlockNosByBatch(batchNumber) -// if err != nil { -// return []uint64{}, err -// } - -// for _, blockNum := range blocks { -// block, err := rawdb.ReadBlockByNumber(tx, blockNum) -// if err != nil { -// return []uint64{}, err -// } -// if block == nil { -// return []uint64{}, nil -// } -// } - -// return blocks, nil -// } - func (v *LegacyExecutorVerifier) GetWholeBatchStreamBytes( batchNumber uint64, tx kv.Tx, diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 7b7711cc9d1..3f8b97a7ed7 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -8,7 +8,6 @@ import ( "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -57,8 +56,12 @@ func SpawnSequencingStage( return err } + var block *types.Block + runLoopBlocks := true batchContext := newBatchContext(ctx, &cfg, &historyCfg, s, sdb) - batchState := newBatchState(forkId, prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed), !isLastBatchPariallyProcessed && cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0) + batchState := newBatchState(forkId, prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed), !isLastBatchPariallyProcessed && cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0, cfg.txPool) + blockDataSizeChecker := newBlockDataChecker() + streamWriter := newSequencerBatchStreamWriter(batchContext, batchState, lastBatch) // using lastBatch (rather than batchState.batchNumber) is not mistake // injected batch if executionAt == 0 { @@ -82,13 +85,15 @@ func SpawnSequencingStage( return err } - if isLastBatchPariallyProcessed { + if !batchState.isL1Recovery() { // check if we just unwound from a bad executor response and if we did just close the batch here - handled, err := doInstantCloseIfNeeded(batchContext, batchState, batchCounters) + handled, err := doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext, batchState, batchCounters) if err != nil || handled { return err } - } else { + } + + if !isLastBatchPariallyProcessed { // handle case where batch wasn't closed properly // close it before starting a new one // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted @@ -98,25 +103,6 @@ func SpawnSequencingStage( } } - batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) - defer batchTicker.Stop() - nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) - defer nonEmptyBatchTimer.Stop() - - runLoopBlocks := true - batchDataOverflow := false - blockDataSizeChecker := NewBlockDataChecker() - streamWriter := newSequencerBatchStreamWriter(batchContext, batchState, lastBatch) - - limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(batchState.batchNumber) - limboRecovery := limboTxHash != nil - isAnyRecovery := batchState.isL1Recovery() || limboRecovery - - // if not limbo set the limboHeaderTimestamp to the "default" value for "prepareHeader" function - if !limboRecovery { - limboHeaderTimestamp = math.MaxUint64 - } - if batchState.isL1Recovery() { if cfg.zk.L1SyncStopBatch > 0 && batchState.batchNumber > cfg.zk.L1SyncStopBatch { log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", batchState.batchNumber) @@ -140,11 +126,21 @@ func SpawnSequencingStage( } } + batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) + defer batchTicker.Stop() + nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) + defer nonEmptyBatchTimer.Stop() + logTicker := time.NewTicker(10 * time.Second) + defer logTicker.Stop() + blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) + defer blockTicker.Stop() + log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) - var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, batchState.forkId)) + logTicker.Reset(10 * time.Second) + blockTicker.Reset(cfg.zk.SequencerBlockSealTime) if batchState.isL1Recovery() { didLoadedAnyDataForRecovery := batchState.loadBlockL1RecoveryData(blockNumber - (executionAt + 1)) @@ -159,12 +155,12 @@ func SpawnSequencingStage( return err } - header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, batchState.blockState.getDeltaTimestamp(), limboHeaderTimestamp, batchState.forkId, batchState.getCoinbase(&cfg)) + header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, batchState.blockState.getDeltaTimestamp(), batchState.getBlockHeaderForcedTimestamp(), batchState.forkId, batchState.getCoinbase(&cfg)) if err != nil { return err } - if batchDataOverflow = blockDataSizeChecker.AddBlockStartData(); batchDataOverflow { + if batchDataOverflow := blockDataSizeChecker.AddBlockStartData(); batchDataOverflow { log.Info(fmt.Sprintf("[%s] BatchL2Data limit reached. Stopping.", logPrefix), "blockNumber", blockNumber) break } @@ -176,7 +172,7 @@ func SpawnSequencingStage( if err != nil { return err } - if !isAnyRecovery && overflowOnNewBlock { + if !batchState.isAnyRecovery() && overflowOnNewBlock { break } @@ -197,41 +193,34 @@ func SpawnSequencingStage( } // start waiting for a new transaction to arrive - if !isAnyRecovery { + if !batchState.isAnyRecovery() { log.Info(fmt.Sprintf("[%s] Waiting for txs from the pool...", logPrefix)) } - // we don't care about defer order here we just need to make sure the tickers are stopped to - // avoid a leak - logTicker := time.NewTicker(10 * time.Second) - defer logTicker.Stop() - blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) - defer blockTicker.Stop() - LOOP_TRANSACTIONS: for { select { case <-logTicker.C: - if !isAnyRecovery { + if !batchState.isAnyRecovery() { log.Info(fmt.Sprintf("[%s] Waiting some more for txs from the pool...", logPrefix)) } case <-blockTicker.C: - if !isAnyRecovery { + if !batchState.isAnyRecovery() { break LOOP_TRANSACTIONS } case <-batchTicker.C: - if !isAnyRecovery { + if !batchState.isAnyRecovery() { runLoopBlocks = false break LOOP_TRANSACTIONS } case <-nonEmptyBatchTimer.C: - if !isAnyRecovery && batchState.hasAnyTransactionsInThisBatch { + if !batchState.isAnyRecovery() && batchState.hasAnyTransactionsInThisBatch { runLoopBlocks = false break LOOP_TRANSACTIONS } default: - if limboRecovery { - batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, limboTxHash) + if batchState.isLimboRecovery() { + batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) if err != nil { return err } @@ -257,7 +246,7 @@ func SpawnSequencingStage( // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { - if limboRecovery { + if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") } @@ -277,7 +266,7 @@ func SpawnSequencingStage( } if anyOverflow { - if limboRecovery { + if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not overflow counters while re-executing") } @@ -322,7 +311,7 @@ func SpawnSequencingStage( break LOOP_TRANSACTIONS } - if limboRecovery { + if batchState.isLimboRecovery() { runLoopBlocks = false break LOOP_TRANSACTIONS } @@ -338,10 +327,10 @@ func SpawnSequencingStage( return err } - if limboRecovery { + if batchState.isLimboRecovery() { stateRoot := block.Root() - cfg.txPool.UpdateLimboRootByTxHash(limboTxHash, &stateRoot) - return fmt.Errorf("[%s] %w: %s = %s", s.LogPrefix(), zk.ErrLimboState, limboTxHash.Hex(), stateRoot.Hex()) + cfg.txPool.UpdateLimboRootByTxHash(batchState.limboRecoveryData.limboTxHash, &stateRoot) + return fmt.Errorf("[%s] %w: %s = %s", s.LogPrefix(), zk.ErrLimboState, batchState.limboRecoveryData.limboTxHash.Hex(), stateRoot.Hex()) } t.LogTimer() diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 8c0411e6a35..aca2c63b3f4 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -2,6 +2,7 @@ package stages import ( "fmt" + "time" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/vm" @@ -36,7 +37,7 @@ func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, is return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), intermediateUsedCounters), nil } -func doInstantCloseIfNeeded(batchContext *BatchContext, batchState *BatchState, batchCounters *vm.BatchCounterCollector) (bool, error) { +func doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext *BatchContext, batchState *BatchState, batchCounters *vm.BatchCounterCollector) (bool, error) { instantClose, err := batchContext.sdb.hermezDb.GetJustUnwound(batchState.batchNumber) if err != nil || !instantClose { return false, err // err here could be nil as well @@ -119,31 +120,48 @@ func updateStreamAndCheckRollback( streamWriter *SequencerBatchStreamWriter, u stagedsync.Unwinder, ) (bool, error) { - committed, err := streamWriter.CommitNewUpdates() + checkedVerifierBundles, err := streamWriter.CommitNewUpdates() if err != nil { return false, err } - for _, commit := range committed { - if commit.Valid { + infiniteLoop := func(batchNumber uint64) { + // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks + for { + time.Sleep(time.Minute) + log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", batchContext.s.LogPrefix(), batchNumber)) + } + } + + for _, verifierBundle := range checkedVerifierBundles { + if verifierBundle.Response.Valid { continue } + // updateStreamAndCheckRollback cannot be invoked during l1 recovery so no point to check it + if batchState.isL1Recovery() || !batchContext.cfg.zk.Limbo { + infiniteLoop(verifierBundle.Request.BatchNumber) + } + + if err = handleLimbo(batchContext, batchState, verifierBundle); err != nil { + return false, err + } + // we are about to unwind so place the marker ready for this to happen if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { return false, err } - unwindTo := commit.BlockNumber - 1 + unwindTo := verifierBundle.Request.GetLastBlockNumber() - 1 // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block // causing the unwind. - unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, commit.BlockNumber) + unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, verifierBundle.Request.GetLastBlockNumber()) if unwindHeader == nil { - return false, fmt.Errorf("could not find header for block %d", commit.BlockNumber) + return false, fmt.Errorf("could not find header for block %d", verifierBundle.Request.GetLastBlockNumber()) } - log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", commit.BlockNumber, "unwindTo", unwindTo, "root", unwindHeader.Root) + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", verifierBundle.Request.GetLastBlockNumber(), "unwindTo", unwindTo, "root", unwindHeader.Root) u.UnwindTo(unwindTo, unwindHeader.Hash()) streamWriter.legacyVerifier.CancelAllRequests() diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index 0da9dd0e67a..373511a1212 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -33,63 +33,41 @@ func newSequencerBatchStreamWriter(batchContext *BatchContext, batchState *Batch } } -type BlockStatus struct { - BlockNumber uint64 - Valid bool - Error error -} - -func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]BlockStatus, error) { - var written []BlockStatus - responses, err := sbc.legacyVerifier.ProcessResultsSequentially() +func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]*verifier.VerifierBundle, error) { + verifierBundles, err := sbc.legacyVerifier.ProcessResultsSequentially() if err != nil { - return written, err + return nil, err } - if len(responses) == 0 { - return written, nil - } - - written, err = sbc.writeBlockDetailsToDatastream(responses) - if err != nil { - return written, err - } - - return written, nil + return sbc.writeBlockDetailsToDatastream(verifierBundles) } -func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBundles []*verifier.VerifierBundle) ([]BlockStatus, error) { - var written []BlockStatus +func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBundles []*verifier.VerifierBundle) ([]*verifier.VerifierBundle, error) { + var checkedVerifierBundles []*verifier.VerifierBundle = make([]*verifier.VerifierBundle, 0, len(verifiedBundles)) for _, bundle := range verifiedBundles { request := bundle.Request response := bundle.Response if response.Valid { - parentBlock, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, response.BlockNumber-1) + parentBlock, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, request.GetLastBlockNumber()-1) if err != nil { - return written, err + return checkedVerifierBundles, err } - block, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, response.BlockNumber) + block, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, request.GetLastBlockNumber()) if err != nil { - return written, err + return checkedVerifierBundles, err } - if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, request.ForkId, response.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { - return written, err + if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, request.ForkId, request.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { + return checkedVerifierBundles, err } // once we have handled the very first block we can update the last batch to be the current batch safely so that // we don't keep adding batch bookmarks in between blocks - sbc.lastBatch = response.BatchNumber - } - - status := BlockStatus{ - BlockNumber: response.BlockNumber, - Valid: response.Valid, - Error: response.Error, + sbc.lastBatch = request.BatchNumber } - written = append(written, status) + checkedVerifierBundles = append(checkedVerifierBundles, bundle) // just break early if there is an invalid response as we don't want to process the remainder anyway if !response.Valid { @@ -97,7 +75,7 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBun } } - return written, nil + return checkedVerifierBundles, nil } func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) error { diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index a022d456828..bfbd66f4f2e 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -6,41 +6,101 @@ import ( "math" "sort" - "github.com/ledgerwatch/erigon/chain" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" "github.com/ledgerwatch/erigon/zk/txpool" "github.com/ledgerwatch/log/v3" ) -func handleLimbo( - logPrefix string, - sdb *stageDb, - batchNo uint64, - forkId uint64, - verifier *verifier.LegacyExecutorVerifier, - response *verifier.VerifierResponse, - pool *txpool.TxPool, - chainConfig *chain.Config, -) error { +type limboStreamBytesGroup struct { + blockNumber uint64 + transactionsIndicesInBlock []int +} - blockNumbers, err := sdb.hermezDb.GetL2BlockNosByBatch(batchNo) - if err != nil { - return err +func newLimboStreamBytesGroup(blockNumber uint64) *limboStreamBytesGroup { + return &limboStreamBytesGroup{ + blockNumber: blockNumber, + transactionsIndicesInBlock: make([]int, 0, 1), + } +} + +type limboStreamBytesBuilderHelper struct { + sendersToGroupMap map[string][]*limboStreamBytesGroup +} + +func newLimboStreamBytesBuilderHelper() *limboStreamBytesBuilderHelper { + return &limboStreamBytesBuilderHelper{ + sendersToGroupMap: make(map[string][]*limboStreamBytesGroup), + } +} + +func (_this *limboStreamBytesBuilderHelper) append(senderMapKey string, blockNumber uint64, transactionIndex int) ([]uint64, [][]int) { + limboStreamBytesGroups := _this.add(senderMapKey, blockNumber, transactionIndex) + + size := len(limboStreamBytesGroups) + resultBlocks := make([]uint64, size) + resultTransactionsSet := make([][]int, size) + + for i := 0; i < size; i++ { + group := limboStreamBytesGroups[i] + resultBlocks[i] = group.blockNumber + resultTransactionsSet[i] = group.transactionsIndicesInBlock } - if len(blockNumbers) == 0 { - panic("failing to verify a batch without blocks") + + return resultBlocks, resultTransactionsSet +} + +func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber uint64, transactionIndex int) []*limboStreamBytesGroup { + limboStreamBytesGroups, ok := _this.sendersToGroupMap[senderMapKey] + if !ok { + limboStreamBytesGroups = []*limboStreamBytesGroup{newLimboStreamBytesGroup(blockNumber)} + _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups } + group := limboStreamBytesGroups[len(limboStreamBytesGroups)-1] + if group.blockNumber != blockNumber { + group = newLimboStreamBytesGroup(blockNumber) + limboStreamBytesGroups = append(limboStreamBytesGroups, group) + _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups + } + group.transactionsIndicesInBlock = append(group.transactionsIndicesInBlock, transactionIndex) + + return limboStreamBytesGroups +} + +func handleLimbo( + batchContext *BatchContext, + batchState *BatchState, + verifierBundle *legacy_executor_verifier.VerifierBundle, + // pool *txpool.TxPool, + // chainConfig *chain.Config, +) error { + request := verifierBundle.Request + response := verifierBundle.Response + legacyVerifier := batchContext.cfg.legacyVerifier + + log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", batchContext.s.LogPrefix()), "batch", request.BatchNumber) + // we have an invalid batch, so we need to notify the txpool that these transactions are spurious + // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the + // pool, but as it knows about these limbo transactions it will place them into limbo instead + // of queueing them again + + // now we need to figure out the highest block number in the batch + // and grab all the transaction hashes along the way to inform the + // pool of hashes to avoid + blockNumbers := request.BlockNumbers sort.Slice(blockNumbers, func(i, j int) bool { return blockNumbers[i] < blockNumbers[j] }) var lowestBlock, highestBlock *types.Block + forkId, err := batchContext.sdb.hermezDb.GetForkId(request.BatchNumber) + if err != nil { + return err + } l1InfoTreeMinTimestamps := make(map[uint64]uint64) - _, err = verifier.GetWholeBatchStreamBytes(batchNo, sdb.tx, blockNumbers, sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil) - if err != nil { + if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blockNumbers, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { return err } @@ -50,11 +110,11 @@ func handleLimbo( limboDetails := txpool.NewLimboBatchDetails() limboDetails.Witness = response.Witness limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps - limboDetails.BatchNumber = response.BatchNumber + limboDetails.BatchNumber = request.BatchNumber limboDetails.ForkId = forkId for _, blockNumber := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(sdb.tx, blockNumber) + block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) if err != nil { return err } @@ -73,7 +133,7 @@ func handleLimbo( return err } - signer := types.MakeSigner(chainConfig, blockNumber) + signer := types.MakeSigner(batchContext.cfg.chainConfig, blockNumber) sender, err := transaction.Sender(*signer) if err != nil { return err @@ -81,7 +141,7 @@ func handleLimbo( senderMapKey := sender.Hex() blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - streamBytes, err := verifier.GetWholeBatchStreamBytes(response.BatchNumber, sdb.tx, blocksForStreamBytes, sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + streamBytes, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blocksForStreamBytes, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) if err != nil { return err } @@ -95,12 +155,12 @@ func handleLimbo( limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - log.Info(fmt.Sprintf("[%s] adding transaction to limbo", logPrefix), "hash", hash) + log.Info(fmt.Sprintf("[%s] adding transaction to limbo", batchContext.s.LogPrefix()), "hash", hash) } } limboDetails.TimestampLimit = highestBlock.Time() limboDetails.FirstBlockNumber = lowestBlock.NumberU64() - pool.ProcessLimboBatchDetails(limboDetails) + batchContext.cfg.txPool.ProcessLimboBatchDetails(limboDetails) return nil } diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index c883f2de20d..79853ab0098 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/zk/l1_data" zktx "github.com/ledgerwatch/erigon/zk/tx" + "github.com/ledgerwatch/erigon/zk/txpool" ) type BatchContext struct { @@ -42,10 +43,11 @@ type BatchState struct { yieldedTransactions mapset.Set[[32]byte] blockState *BlockState batchL1RecoveryData *BatchL1RecoveryData + limboRecoveryData *LimboRecoveryData } -func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recovery bool) *BatchState { - blockState := &BatchState{ +func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recovery bool, txPool *txpool.TxPool) *BatchState { + batchState := &BatchState{ forkId: forkId, batchNumber: batchNumber, hasExecutorForThisBatch: hasExecutorForThisBatch, @@ -54,19 +56,33 @@ func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recove yieldedTransactions: mapset.NewSet[[32]byte](), blockState: newBlockState(), batchL1RecoveryData: nil, + limboRecoveryData: nil, } if l1Recovery { - blockState.batchL1RecoveryData = newBatchL1RecoveryData() + batchState.batchL1RecoveryData = newBatchL1RecoveryData() } - return blockState + limboHeaderTimestamp, limboTxHash := txPool.GetLimboTxHash(batchState.batchNumber) + if limboTxHash != nil { + batchState.limboRecoveryData = newLimboRecoveryData(limboHeaderTimestamp, limboTxHash) + } + + return batchState } func (bs *BatchState) isL1Recovery() bool { return bs.batchL1RecoveryData != nil } +func (bs *BatchState) isLimboRecovery() bool { + return bs.limboRecoveryData != nil +} + +func (bs *BatchState) isAnyRecovery() bool { + return bs.isL1Recovery() || bs.isLimboRecovery() +} + func (bs *BatchState) isThereAnyTransactionsToRecover() bool { if !bs.isL1Recovery() { return false @@ -81,8 +97,17 @@ func (bs *BatchState) loadBlockL1RecoveryData(decodedBlocksIndex uint64) bool { return found } +// if not limbo set the limboHeaderTimestamp to the "default" value for "prepareHeader" function +func (bs *BatchState) getBlockHeaderForcedTimestamp() uint64 { + if bs.isLimboRecovery() { + return bs.limboRecoveryData.limboHeaderTimestamp + } + + return math.MaxUint64 +} + func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { - if bs.batchL1RecoveryData != nil { + if bs.isL1Recovery() { return bs.batchL1RecoveryData.recoveredBatchData.Coinbase } @@ -148,6 +173,19 @@ func (batchL1RecoveryData *BatchL1RecoveryData) getDecodedL1RecoveredBatchDataBy return &batchL1RecoveryData.recoveredBatchData.DecodedData[decodedBlocksIndex], true } +// TYPE LIMBO RECOVERY DATA +type LimboRecoveryData struct { + limboHeaderTimestamp uint64 + limboTxHash *common.Hash +} + +func newLimboRecoveryData(limboHeaderTimestamp uint64, limboTxHash *common.Hash) *LimboRecoveryData { + return &LimboRecoveryData{ + limboHeaderTimestamp: limboHeaderTimestamp, + limboTxHash: limboTxHash, + } +} + // TYPE BLOCK STATE type BlockState struct { transactionsForInclusion []types.Transaction diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 3cb908e24f2..124d92d2f81 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -402,7 +402,7 @@ type BlockDataChecker struct { counter uint64 // counter amount of bytes } -func NewBlockDataChecker() *BlockDataChecker { +func newBlockDataChecker() *BlockDataChecker { return &BlockDataChecker{ limit: LIMIT_120_KB, counter: 0, diff --git a/zk/stages/stage_sequence_execute_verifier.go b/zk/stages/stage_sequence_execute_verifier.go deleted file mode 100644 index c30498bed81..00000000000 --- a/zk/stages/stage_sequence_execute_verifier.go +++ /dev/null @@ -1,180 +0,0 @@ -package stages - -// import ( -// "errors" -// "fmt" -// "sync" - -// "github.com/gateway-fm/cdk-erigon-lib/common" -// "github.com/ledgerwatch/erigon/eth/ethconfig" -// verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" -// "github.com/ledgerwatch/log/v3" -// ) - -// type BatchVerifier struct { -// cfg *ethconfig.Zk -// legacyVerifier *verifier.LegacyExecutorVerifier -// hasExecutor bool -// forkId uint64 -// promises []*verifier.Promise[*verifier.VerifierBundle] -// mtxPromises *sync.Mutex -// // stop bool -// // errors chan error -// // finishCond *sync.Cond -// } - -// func NewBatchVerifier( -// cfg *ethconfig.Zk, -// hasExecutors bool, -// legacyVerifier *verifier.LegacyExecutorVerifier, -// forkId uint64, -// ) *BatchVerifier { -// return &BatchVerifier{ -// cfg: cfg, -// hasExecutor: hasExecutors, -// legacyVerifier: legacyVerifier, -// forkId: forkId, -// mtxPromises: &sync.Mutex{}, -// promises: make([]*verifier.Promise[*verifier.VerifierBundle], 0), -// // errors: make(chan error), -// // finishCond: sync.NewCond(&sync.Mutex{}), -// } -// } - -// func (bv *BatchVerifier) StartAsyncVerification( -// batchNumber uint64, -// blockNumber uint64, -// stateRoot common.Hash, -// counters map[string]int, -// blockNumbers []uint64, -// ) { -// request := verifier.NewVerifierRequest(batchNumber, blockNumber, bv.forkId, stateRoot, counters) - -// var promise *verifier.Promise[*verifier.VerifierBundle] -// if bv.hasExecutor { -// promise = bv.verifyWithExecutor(request, blockNumbers) -// } else { -// promise = bv.verifyWithoutExecutor(request, blockNumbers) -// } - -// bv.appendPromise(promise) -// } - -// func (bv *BatchVerifier) CheckProgress() ([]*verifier.VerifierBundle, int, error) { -// bv.mtxPromises.Lock() -// defer bv.mtxPromises.Unlock() - -// var verifierResponse []*verifier.VerifierBundle - -// // not a stop signal, so we can start to process our promises now -// for idx, promise := range bv.promises { -// verifierBundle, err := promise.TryGet() -// if verifierBundle == nil && err == nil { -// // If code enters here this means that this promise is not yet completed -// // We must processes responses sequentially so if this one is not ready we can just break -// break -// } - -// if err != nil { -// // let leave it for debug purposes -// // a cancelled promise is removed from v.promises => it should never appear here, that's why let's panic if it happens, because it will indicate for massive error -// if errors.Is(err, verifier.ErrPromiseCancelled) { -// panic("this should never happen") -// } - -// log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) - -// if verifierBundle.Request.IsOverdue() { -// // signal an error, the caller can check on this and stop the process if needs be -// return nil, 0, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) -// } - -// // re-queue the task - it should be safe to replace the index of the slice here as we only add to it -// bv.promises[idx] = promise.CloneAndRerun() - -// // break now as we know we can't proceed here until this promise is attempted again -// break -// } - -// verifierResponse = append(verifierResponse, verifierBundle) -// } - -// // remove processed promises from the list -// bv.promises = bv.promises[len(verifierResponse):] - -// return verifierResponse, len(bv.promises), nil -// } - -// // func (bv *BatchVerifier) CancelAllRequestsUnsafe() { -// // bv.mtxPromises.Lock() -// // defer bv.mtxPromises.Unlock() - -// // // cancel all promises -// // // all queued promises will return ErrPromiseCancelled while getting its result -// // for _, p := range bv.promises { -// // p.Cancel() -// // } - -// // // the goal of this car is to ensure that running promises are stopped as soon as possible -// // // we need it because the promise's function must finish and then the promise checks if it has been cancelled -// // bv.legacyVerifier.cancelAllVerifications.Store(true) - -// // for _, e := range bv.legacyVerifier.executors { -// // // let's wait for all threads that are waiting to add to v.openRequests to finish -// // for e.QueueLength() > 0 { -// // time.Sleep(1 * time.Millisecond) -// // } -// // } - -// // bv.legacyVerifier.cancelAllVerifications.Store(false) - -// // bv.promises = make([]*verifier.Promise[*verifier.VerifierBundle], 0) -// // } - -// // func (bv *BatchVerifier) WaitForFinish() { -// // count := 0 -// // bv.mtxPromises.Lock() -// // count = len(bv.promises) -// // bv.mtxPromises.Unlock() - -// // if count > 0 { -// // bv.finishCond.L.Lock() -// // bv.finishCond.Wait() -// // bv.finishCond.L.Unlock() -// // } -// // } - -// func (bv *BatchVerifier) appendPromise(promise *verifier.Promise[*verifier.VerifierBundle]) { -// bv.mtxPromises.Lock() -// defer bv.mtxPromises.Unlock() -// bv.promises = append(bv.promises, promise) -// } - -// func (bv *BatchVerifier) verifyWithoutExecutor(request *verifier.VerifierRequest, blockNumbers []uint64) *verifier.Promise[*verifier.VerifierBundle] { -// valid := true -// // simulate a die roll to determine if this is a good batch or not -// // 1 in 6 chance of being a bad batch -// // if rand.Intn(6) == 0 { -// // valid = false -// // } - -// promise := verifier.NewPromise[*verifier.VerifierBundle](func() (*verifier.VerifierBundle, error) { -// response := &verifier.VerifierResponse{ -// BatchNumber: request.BatchNumber, -// BlockNumber: request.BlockNumber, -// Valid: valid, -// OriginalCounters: request.Counters, -// Witness: nil, -// ExecutorResponse: nil, -// Error: nil, -// } -// return verifier.NewVerifierBundle(request, response), nil -// }) -// promise.Wait() - -// return promise -// } - -// func (bv *BatchVerifier) verifyWithExecutor(request *verifier.VerifierRequest, blockNumbers []uint64) *verifier.Promise[*verifier.VerifierBundle] { -// return bv.legacyVerifier.VerifyAsync(request, blockNumbers) -// } diff --git a/zk/stages/stage_sequencer_executor_verify.go b/zk/stages/stage_sequencer_executor_verify.go deleted file mode 100644 index 98020c3a388..00000000000 --- a/zk/stages/stage_sequencer_executor_verify.go +++ /dev/null @@ -1,368 +0,0 @@ -package stages - -import ( - "context" - - "github.com/gateway-fm/cdk-erigon-lib/kv" - "github.com/ledgerwatch/erigon/chain" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" - "github.com/ledgerwatch/erigon/zk/txpool" -) - -type SequencerExecutorVerifyCfg struct { - db kv.RwDB - verifier *legacy_executor_verifier.LegacyExecutorVerifier - txPool *txpool.TxPool - chainConfig *chain.Config - cfgZk *ethconfig.Zk -} - -func StageSequencerExecutorVerifyCfg( - db kv.RwDB, - verifier *legacy_executor_verifier.LegacyExecutorVerifier, - pool *txpool.TxPool, - chainConfig *chain.Config, - cfgZk *ethconfig.Zk, -) SequencerExecutorVerifyCfg { - return SequencerExecutorVerifyCfg{ - db: db, - verifier: verifier, - txPool: pool, - chainConfig: chainConfig, - cfgZk: cfgZk, - } -} - -func SpawnSequencerExecutorVerifyStage( - s *stagedsync.StageState, - u stagedsync.Unwinder, - tx kv.RwTx, - ctx context.Context, - cfg SequencerExecutorVerifyCfg, - quiet bool, -) error { - // logPrefix := s.LogPrefix() - // log.Info(fmt.Sprintf("[%s] Starting sequencer verify stage", logPrefix)) - // defer log.Info(fmt.Sprintf("[%s] Finished sequencer verify stage", logPrefix)) - - // var err error - // freshTx := tx == nil - // if freshTx { - // tx, err = cfg.db.BeginRw(ctx) - // if err != nil { - // return err - // } - // defer tx.Rollback() - // } - - // hermezDb := hermez_db.NewHermezDb(tx) - // hermezDbReader := hermez_db.NewHermezDbReader(tx) - - // // progress here is at the batch level - // progress, err := stages.GetStageProgress(tx, stages.SequenceExecutorVerify) - // if err != nil { - // return err - // } - - // // progress here is at the block level - // executeProgress, err := stages.GetStageProgress(tx, stages.Execution) - // if err != nil { - // return err - // } - - // // we need to get the batch number for the latest block, so we can search for new batches to send for - // // verification - // latestBatch, err := hermezDb.GetBatchNoByL2Block(executeProgress) - // if err != nil { - // return err - // } - - // isBatchPartial, err := hermezDb.GetIsBatchPartiallyProcessed(latestBatch) - // if err != nil { - // return err - // } - // // we could be running in a state with no executors so we need instant response that we are in an - // // ok state to save lag in the data stream !!Dragons: there will be no witnesses stored running in - // // this mode of operation - // canVerify := cfg.verifier.HasExecutorsUnsafe() - - // // if batch was stopped intermediate and is not finished - we need to finish it first - // // this shouldn't occur since exec stage is before that and should finish the batch - // // but just in case something unexpected happens - // if isBatchPartial { - // log.Error(fmt.Sprintf("[%s] batch %d is not fully processed in stage_execute", logPrefix, latestBatch)) - // canVerify = false - // } - - // if !canVerify { - // if latestBatch == injectedBatchNumber { - // return nil - // } - - // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, latestBatch); err != nil { - // return err - // } - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - // return nil - // } - - // // get ordered promises from the verifier - // // NB: this call is where the stream write happens (so it will be delayed until this stage is run) - // responses, err := cfg.verifier.ProcessResultsSequentiallyUnsafe(tx) - // if err != nil { - // //TODO: what happen with promises if this request returns here? - // return err - // } - - // for _, response := range responses { - // // ensure that the first response is the next batch based on the current stage progress - // // otherwise just return early until we get it - // if response.BatchNumber != progress+1 { - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - // return nil - // } - - // // now check that we are indeed in a good state to continue - // if !response.Valid { - // if cfg.cfgZk.Limbo { - // log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", s.LogPrefix()), "batch", response.BatchNumber) - // // we have an invalid batch, so we need to notify the txpool that these transactions are spurious - // // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the - // // pool, but as it knows about these limbo transactions it will place them into limbo instead - // // of queueing them again - - // // now we need to figure out the highest block number in the batch - // // and grab all the transaction hashes along the way to inform the - // // pool of hashes to avoid - // blockNumbers, err := hermezDb.GetL2BlockNosByBatch(response.BatchNumber) - // if err != nil { - // return err - // } - // if len(blockNumbers) == 0 { - // panic("failing to verify a batch without blocks") - // } - // sort.Slice(blockNumbers, func(i, j int) bool { - // return blockNumbers[i] < blockNumbers[j] - // }) - - // var lowestBlock, highestBlock *types.Block - // forkId, err := hermezDb.GetForkId(response.BatchNumber) - // if err != nil { - // return err - // } - - // l1InfoTreeMinTimestamps := make(map[uint64]uint64) - // if _, err = cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { - // return err - // } - - // limboSendersToPreviousTxMap := make(map[string]uint32) - // limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() - - // limboDetails := txpool.NewLimboBatchDetails() - // limboDetails.Witness = response.Witness - // limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps - // limboDetails.BatchNumber = response.BatchNumber - // limboDetails.ForkId = forkId - - // for _, blockNumber := range blockNumbers { - // block, err := rawdb.ReadBlockByNumber(tx, blockNumber) - // if err != nil { - // return err - // } - // highestBlock = block - // if lowestBlock == nil { - // // capture the first block, then we can set the bad block hash in the unwind to terminate the - // // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run - // lowestBlock = block - // } - - // for i, transaction := range block.Transactions() { - // var b []byte - // buffer := bytes.NewBuffer(b) - // err = transaction.EncodeRLP(buffer) - // if err != nil { - // return err - // } - - // signer := types.MakeSigner(cfg.chainConfig, blockNumber) - // sender, err := transaction.Sender(*signer) - // if err != nil { - // return err - // } - // senderMapKey := sender.Hex() - - // blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - // streamBytes, err := cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) - // if err != nil { - // return err - // } - - // previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] - // if !ok { - // previousTxIndex = math.MaxUint32 - // } - - // hash := transaction.Hash() - // limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) - // limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - - // log.Info(fmt.Sprintf("[%s] adding transaction to limbo", s.LogPrefix()), "hash", hash) - // } - // } - - // limboDetails.TimestampLimit = highestBlock.Time() - // limboDetails.FirstBlockNumber = lowestBlock.NumberU64() - // cfg.txPool.ProcessLimboBatchDetails(limboDetails) - - // u.UnwindTo(lowestBlock.NumberU64()-1, lowestBlock.Hash()) - // cfg.verifier.CancelAllRequestsUnsafe() - // return nil - // } else { - // // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks - // for { - // time.Sleep(time.Minute) - // log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", s.LogPrefix(), response.BatchNumber)) - // } - // } - // } - - // // all good so just update the stage progress for now - // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, response.BatchNumber); err != nil { - // return err - // } - - // // we know that if the batch has been marked as OK we can update the datastream progress to match - // // as the verifier will have handled writing to the stream - // highestBlock, err := hermezDb.GetHighestBlockInBatch(response.BatchNumber) - // if err != nil { - // return err - // } - - // if err = stages.SaveStageProgress(tx, stages.DataStream, highestBlock); err != nil { - // return err - // } - - // // store the witness - // errWitness := hermezDb.WriteWitness(response.BatchNumber, response.Witness) - // if errWitness != nil { - // log.Warn("Failed to write witness", "batch", response.BatchNumber, "err", errWitness) - // } - - // cfg.verifier.MarkTopResponseAsProcessed(response.BatchNumber) - // progress = response.BatchNumber - // } - - // // send off the new batches to the verifier to be processed - // for batch := progress + 1; batch <= latestBatch; batch++ { - // // we do not need to verify batch 1 as this is the injected batch so just updated progress and move on - // if batch == injectedBatchBatchNumber { - // if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchBatchNumber); err != nil { - // return err - // } - // } else { - // if cfg.verifier.IsRequestAddedUnsafe(batch) { - // continue - // } - - // // we need the state root of the last block in the batch to send to the executor - // highestBlock, err := hermezDb.GetHighestBlockInBatch(batch) - // if err != nil { - // return err - // } - // if highestBlock == 0 { - // // maybe nothing in this batch and we know we don't handle batch 0 (genesis) - // continue - // } - // block, err := rawdb.ReadBlockByNumber(tx, highestBlock) - // if err != nil { - // return err - // } - - // counters, found, err := hermezDb.GetBatchCounters(batch) - // if err != nil { - // return err - // } - // if !found { - // return errors.New("batch counters not found") - // } - - // forkId, err := hermezDb.GetForkId(batch) - // if err != nil { - // return err - // } - - // if forkId == 0 { - // return errors.New("the network cannot have a 0 fork id") - // } - - // cfg.verifier.AddRequestUnsafe(legacy_executor_verifier.NewVerifierRequest(batch, forkId, block.Root(), counters), cfg.cfgZk.SequencerBatchSealTime) - // } - // } - - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - - return nil -} - -func UnwindSequencerExecutorVerifyStage( - u *stagedsync.UnwindState, - s *stagedsync.StageState, - tx kv.RwTx, - ctx context.Context, - cfg SequencerExecutorVerifyCfg, -) (err error) { - /* - The "Unwinder" keeps stage's progress in blocks. - If a stage's current progress is <= unwindPoint then the unwind is not invoked for this stage (sync.go line 386) - For this particular case, the progress is in batches => its progress is always <= unwindPoint, because unwindPoint is in blocks - This is not a problem, because this stage's progress actually keeps the number of last verified batch and we never unwind the last verified batch - */ - - // freshTx := tx == nil - // if freshTx { - // tx, err = cfg.db.BeginRw(ctx) - // if err != nil { - // return err - // } - // defer tx.Rollback() - // } - - // logPrefix := u.LogPrefix() - // log.Info(fmt.Sprintf("[%s] Unwind Executor Verify", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint) - - // if err = u.Done(tx); err != nil { - // return err - // } - - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - - return nil -} - -func PruneSequencerExecutorVerifyStage( - s *stagedsync.PruneState, - tx kv.RwTx, - cfg SequencerExecutorVerifyCfg, - ctx context.Context, -) error { - return nil -} diff --git a/zk/stages/stage_sequencer_executor_verify_limbo.go b/zk/stages/stage_sequencer_executor_verify_limbo.go deleted file mode 100644 index a1328dd9dcc..00000000000 --- a/zk/stages/stage_sequencer_executor_verify_limbo.go +++ /dev/null @@ -1,56 +0,0 @@ -package stages - -type limboStreamBytesGroup struct { - blockNumber uint64 - transactionsIndicesInBlock []int -} - -func newLimboStreamBytesGroup(blockNumber uint64) *limboStreamBytesGroup { - return &limboStreamBytesGroup{ - blockNumber: blockNumber, - transactionsIndicesInBlock: make([]int, 0, 1), - } -} - -type limboStreamBytesBuilderHelper struct { - sendersToGroupMap map[string][]*limboStreamBytesGroup -} - -func newLimboStreamBytesBuilderHelper() *limboStreamBytesBuilderHelper { - return &limboStreamBytesBuilderHelper{ - sendersToGroupMap: make(map[string][]*limboStreamBytesGroup), - } -} - -func (_this *limboStreamBytesBuilderHelper) append(senderMapKey string, blockNumber uint64, transactionIndex int) ([]uint64, [][]int) { - limboStreamBytesGroups := _this.add(senderMapKey, blockNumber, transactionIndex) - - size := len(limboStreamBytesGroups) - resultBlocks := make([]uint64, size) - resultTransactionsSet := make([][]int, size) - - for i := 0; i < size; i++ { - group := limboStreamBytesGroups[i] - resultBlocks[i] = group.blockNumber - resultTransactionsSet[i] = group.transactionsIndicesInBlock - } - - return resultBlocks, resultTransactionsSet -} - -func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber uint64, transactionIndex int) []*limboStreamBytesGroup { - limboStreamBytesGroups, ok := _this.sendersToGroupMap[senderMapKey] - if !ok { - limboStreamBytesGroups = []*limboStreamBytesGroup{newLimboStreamBytesGroup(blockNumber)} - _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups - } - group := limboStreamBytesGroups[len(limboStreamBytesGroups)-1] - if group.blockNumber != blockNumber { - group = newLimboStreamBytesGroup(blockNumber) - limboStreamBytesGroups = append(limboStreamBytesGroups, group) - _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups - } - group.transactionsIndicesInBlock = append(group.transactionsIndicesInBlock, transactionIndex) - - return limboStreamBytesGroups -} diff --git a/zk/stages/stage_sequencer_interhashes.go b/zk/stages/stage_sequencer_interhashes.go index 07eff2e521d..ddef9f0d9ba 100644 --- a/zk/stages/stage_sequencer_interhashes.go +++ b/zk/stages/stage_sequencer_interhashes.go @@ -7,21 +7,6 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" ) -// type SequencerInterhashesCfg struct { -// db kv.RwDB -// accumulator *shards.Accumulator -// } - -// func StageSequencerInterhashesCfg( -// db kv.RwDB, -// accumulator *shards.Accumulator, -// ) SequencerInterhashesCfg { -// return SequencerInterhashesCfg{ -// db: db, -// accumulator: accumulator, -// } -// } - // This stages does NOTHING while going forward, because its done during execution // Even this stage progress is updated in execution stage func SpawnSequencerInterhashesStage( @@ -32,32 +17,6 @@ func SpawnSequencerInterhashesStage( cfg ZkInterHashesCfg, quiet bool, ) error { - // var err error - - // freshTx := tx == nil - // if freshTx { - // tx, err = cfg.db.BeginRw(ctx) - // if err != nil { - // return err - // } - // defer tx.Rollback() - // } - - // to, err := s.ExecutionAt(tx) - // if err != nil { - // return err - // } - - // if err := s.Update(tx, to); err != nil { - // return err - // } - - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - return nil } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index 92b236b1835..583d0c9eaf0 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -17,11 +17,9 @@ func SequencerZkStages( l1InfoTreeCfg L1InfoTreeCfg, sequencerL1BlockSyncCfg SequencerL1BlockSyncCfg, dataStreamCatchupCfg DataStreamCatchupCfg, - // sequencerInterhashesCfg SequencerInterhashesCfg, exec SequenceBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, - // sequencerExecutorVerifyCfg SequencerExecutorVerifyCfg, history stages.HistoryCfg, logIndex stages.LogIndexCfg, callTraces stages.CallTracesCfg, @@ -128,19 +126,6 @@ func SequencerZkStages( return PruneSequencerInterhashesStage(p, tx, zkInterHashesCfg, ctx) }, }, - // { - // ID: stages2.SequenceExecutorVerify, - // Description: "Sequencer, check batch with legacy executor", - // Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - // return SpawnSequencerExecutorVerifyStage(s, u, tx, ctx, sequencerExecutorVerifyCfg, quiet) - // }, - // Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { - // return UnwindSequencerExecutorVerifyStage(u, s, tx, ctx, sequencerExecutorVerifyCfg) - // }, - // Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { - // return PruneSequencerExecutorVerifyStage(p, tx, sequencerExecutorVerifyCfg, ctx) - // }, - // }, { ID: stages2.HashState, Description: "Hash the key in the state", @@ -175,8 +160,9 @@ func SequencerZkStages( Description: "Generate account history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return nil // return stages.SpawnAccountHistoryIndex(s, tx, history, ctx) + // only forward part of this stage is part of execution stage + return nil }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindAccountHistoryIndex(u, s, tx, history, ctx) @@ -190,8 +176,9 @@ func SequencerZkStages( Description: "Generate storage history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return nil // return stages.SpawnStorageHistoryIndex(s, tx, history, ctx) + // only forward part of this stage is part of execution stage + return nil }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindStorageHistoryIndex(u, s, tx, history, ctx) diff --git a/zk/txpool/pool_zk_limbo_processor.go b/zk/txpool/pool_zk_limbo_processor.go index 667d63c51eb..c74e47aefae 100644 --- a/zk/txpool/pool_zk_limbo_processor.go +++ b/zk/txpool/pool_zk_limbo_processor.go @@ -2,12 +2,16 @@ package txpool import ( "context" + "math" "time" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/chain" + "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/log/v3" + "github.com/status-im/keycard-go/hexutils" ) type LimboSubPoolProcessor struct { @@ -47,54 +51,54 @@ func (_this *LimboSubPoolProcessor) StartWork() { } func (_this *LimboSubPoolProcessor) run() { - // log.Info("[Limbo pool processor] Starting") - // defer log.Info("[Limbo pool processor] End") + log.Info("[Limbo pool processor] Starting") + defer log.Info("[Limbo pool processor] End") - // ctx := context.Background() - // limboBatchDetails := _this.txPool.GetLimboDetailsCloned() + ctx := context.Background() + limboBatchDetails := _this.txPool.GetLimboDetailsCloned() - // size := len(limboBatchDetails) - // if size == 0 { - // return - // } + size := len(limboBatchDetails) + if size == 0 { + return + } - // for _, limboBatch := range limboBatchDetails { - // for _, limboTx := range limboBatch.Transactions { - // if !limboTx.hasRoot() { - // return - // } - // } - // } + for _, limboBatch := range limboBatchDetails { + for _, limboTx := range limboBatch.Transactions { + if !limboTx.hasRoot() { + return + } + } + } - // tx, err := _this.db.BeginRo(ctx) - // if err != nil { - // return - // } - // defer tx.Rollback() + tx, err := _this.db.BeginRo(ctx) + if err != nil { + return + } + defer tx.Rollback() - // // we just need some counter variable with large used values in order verify not to complain - // batchCounters := vm.NewBatchCounterCollector(256, 1, _this.zkCfg.VirtualCountersSmtReduction, true, nil) - // unlimitedCounters := batchCounters.NewCounters().UsedAsMap() - // for k := range unlimitedCounters { - // unlimitedCounters[k] = math.MaxInt32 - // } + // we just need some counter variable with large used values in order verify not to complain + batchCounters := vm.NewBatchCounterCollector(256, 1, _this.zkCfg.VirtualCountersSmtReduction, true, nil) + unlimitedCounters := batchCounters.NewCounters().UsedAsMap() + for k := range unlimitedCounters { + unlimitedCounters[k] = math.MaxInt32 + } - // invalidTxs := []*string{} + invalidTxs := []*string{} - // for _, limboBatch := range limboBatchDetails { - // for _, limboTx := range limboBatch.Transactions { - // request := legacy_executor_verifier.NewVerifierRequest(limboBatch.BatchNumber, limboBatch.ForkId, limboTx.Root, unlimitedCounters) - // err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) - // if err != nil { - // idHash := hexutils.BytesToHex(limboTx.Hash[:]) - // invalidTxs = append(invalidTxs, &idHash) - // log.Info("[Limbo pool processor]", "invalid tx", limboTx.Hash, "err", err) - // continue - // } + for _, limboBatch := range limboBatchDetails { + for _, limboTx := range limboBatch.Transactions { + request := legacy_executor_verifier.NewVerifierRequest(limboBatch.ForkId, limboBatch.BatchNumber, []uint64{1}, limboTx.Root, unlimitedCounters) // let's assume that there is a just single block number 1 + err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) + if err != nil { + idHash := hexutils.BytesToHex(limboTx.Hash[:]) + invalidTxs = append(invalidTxs, &idHash) + log.Info("[Limbo pool processor]", "invalid tx", limboTx.Hash, "err", err) + continue + } - // log.Info("[Limbo pool processor]", "valid tx", limboTx.Hash) - // } - // } + log.Info("[Limbo pool processor]", "valid tx", limboTx.Hash) + } + } - // _this.txPool.MarkProcessedLimboDetails(size, invalidTxs) + _this.txPool.MarkProcessedLimboDetails(size, invalidTxs) } From af2d4fec3ad007715e25fb027d4782b328daa78b Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Thu, 1 Aug 2024 15:21:39 +0000 Subject: [PATCH 15/33] add limbo for single block in a batch only --- zk/hermez_db/db.go | 35 +++++--- .../legacy_executor_verifier.go | 4 +- zk/stages/stage_sequence_execute.go | 12 ++- zk/stages/stage_sequence_execute_batch.go | 76 ++++++++-------- zk/stages/stage_sequence_execute_limbo.go | 89 ++++++++++--------- zk/stages/stage_sequence_execute_unwind.go | 9 ++ 6 files changed, 123 insertions(+), 102 deletions(-) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 4f291665429..f6b00ec2d0a 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -1533,6 +1533,15 @@ func (db *HermezDbReader) GetIsBatchPartiallyProcessed(batchNo uint64) (bool, er return len(v) > 0, nil } +func (db *HermezDb) TruncateIsBatchPartiallyProcessed(fromBatch, toBatch uint64) error { + for batch := fromBatch; batch <= toBatch; batch++ { + if err := db.DeleteIsBatchPartiallyProcessed(batch); err != nil { + return err + } + } + return nil +} + func (db *HermezDb) WriteLocalExitRootForBatchNo(batchNo uint64, root common.Hash) error { return db.tx.Put(LOCAL_EXIT_ROOTS, Uint64ToBytes(batchNo), root.Bytes()) } @@ -1614,18 +1623,18 @@ func (db *HermezDbReader) GetAllForkHistory() ([]uint64, []uint64, error) { return forks, batches, nil } -func (db *HermezDb) WriteJustUnwound(batch uint64) error { - return db.tx.Put(JUST_UNWOUND, Uint64ToBytes(batch), []byte{1}) -} +// func (db *HermezDb) WriteJustUnwound(batch uint64) error { +// return db.tx.Put(JUST_UNWOUND, Uint64ToBytes(batch), []byte{1}) +// } -func (db *HermezDb) DeleteJustUnwound(batch uint64) error { - return db.tx.Delete(JUST_UNWOUND, Uint64ToBytes(batch)) -} +// func (db *HermezDb) DeleteJustUnwound(batch uint64) error { +// return db.tx.Delete(JUST_UNWOUND, Uint64ToBytes(batch)) +// } -func (db *HermezDb) GetJustUnwound(batch uint64) (bool, error) { - v, err := db.tx.GetOne(JUST_UNWOUND, Uint64ToBytes(batch)) - if err != nil { - return false, err - } - return len(v) > 0, nil -} +// func (db *HermezDb) GetJustUnwound(batch uint64) (bool, error) { +// v, err := db.tx.GetOne(JUST_UNWOUND, Uint64ToBytes(batch)) +// if err != nil { +// return false, err +// } +// return len(v) > 0, nil +// } diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 3797e89d002..981fe66cc14 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -253,8 +253,8 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) - if request.GetLastBlockNumber() == 9 && counter == 0 { - ok = false + if request.GetLastBlockNumber() == 7 && counter == 0 { + // ok = false counter = 1 } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 3f8b97a7ed7..b52e19364cb 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -85,13 +85,11 @@ func SpawnSequencingStage( return err } - if !batchState.isL1Recovery() { - // check if we just unwound from a bad executor response and if we did just close the batch here - handled, err := doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext, batchState, batchCounters) - if err != nil || handled { - return err - } - } + // // check if we just unwound from a bad executor response and if we did just close the batch here + // handled, err := doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext, batchState, isLastBatchPariallyProcessed, batchCounters) + // if err != nil || handled { + // return err + // } if !isLastBatchPariallyProcessed { // handle case where batch wasn't closed properly diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index aca2c63b3f4..5218909ab60 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -37,38 +37,42 @@ func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, is return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), intermediateUsedCounters), nil } -func doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext *BatchContext, batchState *BatchState, batchCounters *vm.BatchCounterCollector) (bool, error) { - instantClose, err := batchContext.sdb.hermezDb.GetJustUnwound(batchState.batchNumber) - if err != nil || !instantClose { - return false, err // err here could be nil as well - } - - if err = batchContext.sdb.hermezDb.DeleteJustUnwound(batchState.batchNumber); err != nil { - return false, err - } - - // lets first check if we actually wrote any blocks in this batch - blocks, err := batchContext.sdb.hermezDb.GetL2BlockNosByBatch(batchState.batchNumber) - if err != nil { - return false, err - } - - // only close this batch down if we actually made any progress in it, otherwise - // just continue processing as normal and recreate the batch from scratch - if len(blocks) > 0 { - if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { - return false, err - } - if err = updateSequencerProgress(batchContext.sdb.tx, blocks[len(blocks)-1], batchState.batchNumber, 1, false); err != nil { - return false, err - } - - err = batchContext.sdb.tx.Commit() - return err == nil, err - } - - return false, nil -} +// func doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext *BatchContext, batchState *BatchState, isLastBatchPariallyProcessed bool, batchCounters *vm.BatchCounterCollector) (bool, error) { +// // instantClose, err := batchContext.sdb.hermezDb.GetJustUnwound(blockNumber) +// // if err != nil || !instantClose { +// // return false, err // err here could be nil as well +// // } + +// // if err = batchContext.sdb.hermezDb.DeleteJustUnwound(blockNumber); err != nil { +// // return false, err +// // } + +// if !isLastBatchPariallyProcessed || !batchState.isLimboRecovery() { +// return false, nil +// } + +// // lets first check if we actually wrote any blocks in this batch +// blocks, err := batchContext.sdb.hermezDb.GetL2BlockNosByBatch(batchState.batchNumber) +// if err != nil { +// return false, err +// } + +// // only close this batch down if we actually made any progress in it, otherwise +// // just continue processing as normal and recreate the batch from scratch +// if len(blocks) > 0 { +// if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { +// return false, err +// } +// if err = updateSequencerProgress(batchContext.sdb.tx, blocks[len(blocks)-1], batchState.batchNumber, 1, false); err != nil { +// return false, err +// } + +// err = batchContext.sdb.tx.Commit() +// return err == nil, err +// } + +// return false, nil +// } func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) (bool, error) { infoTreeIndex, err := batchState.batchL1RecoveryData.getInfoTreeIndex(batchContext.sdb) @@ -147,10 +151,10 @@ func updateStreamAndCheckRollback( return false, err } - // we are about to unwind so place the marker ready for this to happen - if err = batchContext.sdb.hermezDb.WriteJustUnwound(batchState.batchNumber); err != nil { - return false, err - } + // // we are about to unwind so place the marker ready for this to happen + // if err = batchContext.sdb.hermezDb.WriteJustUnwound(verifierBundle.Request.GetLastBlockNumber()); err != nil { + // return false, err + // } unwindTo := verifierBundle.Request.GetLastBlockNumber() - 1 diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index bfbd66f4f2e..6e46f2fbb70 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "math" - "sort" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" @@ -88,10 +87,10 @@ func handleLimbo( // now we need to figure out the highest block number in the batch // and grab all the transaction hashes along the way to inform the // pool of hashes to avoid - blockNumbers := request.BlockNumbers - sort.Slice(blockNumbers, func(i, j int) bool { - return blockNumbers[i] < blockNumbers[j] - }) + // blockNumbers := request.BlockNumbers + // sort.Slice(blockNumbers, func(i, j int) bool { + // return blockNumbers[i] < blockNumbers[j] + // }) var lowestBlock, highestBlock *types.Block forkId, err := batchContext.sdb.hermezDb.GetForkId(request.BatchNumber) @@ -100,7 +99,8 @@ func handleLimbo( } l1InfoTreeMinTimestamps := make(map[uint64]uint64) - if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blockNumbers, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { + // if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blockNumbers, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { + if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, []uint64{request.GetLastBlockNumber()}, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { return err } @@ -113,51 +113,52 @@ func handleLimbo( limboDetails.BatchNumber = request.BatchNumber limboDetails.ForkId = forkId - for _, blockNumber := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) + // for _, blockNumber := range blockNumbers { + blockNumber := request.GetLastBlockNumber() + block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) + if err != nil { + return err + } + highestBlock = block + if lowestBlock == nil { + // capture the first block, then we can set the bad block hash in the unwind to terminate the + // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run + lowestBlock = block + } + + for i, transaction := range block.Transactions() { + var b []byte + buffer := bytes.NewBuffer(b) + err = transaction.EncodeRLP(buffer) + if err != nil { + return err + } + + signer := types.MakeSigner(batchContext.cfg.chainConfig, blockNumber) + sender, err := transaction.Sender(*signer) if err != nil { return err } - highestBlock = block - if lowestBlock == nil { - // capture the first block, then we can set the bad block hash in the unwind to terminate the - // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run - lowestBlock = block + senderMapKey := sender.Hex() + + blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) + streamBytes, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blocksForStreamBytes, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + if err != nil { + return err } - for i, transaction := range block.Transactions() { - var b []byte - buffer := bytes.NewBuffer(b) - err = transaction.EncodeRLP(buffer) - if err != nil { - return err - } - - signer := types.MakeSigner(batchContext.cfg.chainConfig, blockNumber) - sender, err := transaction.Sender(*signer) - if err != nil { - return err - } - senderMapKey := sender.Hex() - - blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - streamBytes, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blocksForStreamBytes, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) - if err != nil { - return err - } - - previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] - if !ok { - previousTxIndex = math.MaxUint32 - } - - hash := transaction.Hash() - limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) - limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - - log.Info(fmt.Sprintf("[%s] adding transaction to limbo", batchContext.s.LogPrefix()), "hash", hash) + previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] + if !ok { + previousTxIndex = math.MaxUint32 } + + hash := transaction.Hash() + limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) + limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 + + log.Info(fmt.Sprintf("[%s] adding transaction to limbo", batchContext.s.LogPrefix()), "hash", hash) } + // } limboDetails.TimestampLimit = highestBlock.Time() limboDetails.FirstBlockNumber = lowestBlock.NumberU64() diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index bced0c57dc1..3072944bebf 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -133,6 +133,15 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw if err = hermezDb.TruncateForkId(fromBatchForForkIdDeletion, toBatch); err != nil { return fmt.Errorf("truncate fork id error: %v", err) } + // only seq + if err = hermezDb.TruncateIsBatchPartiallyProcessed(fromBatch, toBatch); err != nil { + return fmt.Errorf("truncate fork id error: %v", err) + } + if lastBatchToKeepBeforeFrom == fromBatch { + if err = hermezDb.WriteIsBatchPartiallyProcessed(lastBatchToKeepBeforeFrom); err != nil { + return fmt.Errorf("truncate fork id error: %v", err) + } + } return nil } From 4062ae896b9762b2175c4513717eaaf1f9d914fe Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 06:48:36 +0000 Subject: [PATCH 16/33] code cleanup --- zk/hermez_db/db.go | 16 --------- zk/stages/stage_sequence_execute.go | 6 ---- zk/stages/stage_sequence_execute_batch.go | 42 ---------------------- zk/stages/stage_sequence_execute_limbo.go | 43 +++-------------------- 4 files changed, 5 insertions(+), 102 deletions(-) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index f6b00ec2d0a..45ca6becd46 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -1622,19 +1622,3 @@ func (db *HermezDbReader) GetAllForkHistory() ([]uint64, []uint64, error) { return forks, batches, nil } - -// func (db *HermezDb) WriteJustUnwound(batch uint64) error { -// return db.tx.Put(JUST_UNWOUND, Uint64ToBytes(batch), []byte{1}) -// } - -// func (db *HermezDb) DeleteJustUnwound(batch uint64) error { -// return db.tx.Delete(JUST_UNWOUND, Uint64ToBytes(batch)) -// } - -// func (db *HermezDb) GetJustUnwound(batch uint64) (bool, error) { -// v, err := db.tx.GetOne(JUST_UNWOUND, Uint64ToBytes(batch)) -// if err != nil { -// return false, err -// } -// return len(v) > 0, nil -// } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index b52e19364cb..e6a32a6329d 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -85,12 +85,6 @@ func SpawnSequencingStage( return err } - // // check if we just unwound from a bad executor response and if we did just close the batch here - // handled, err := doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext, batchState, isLastBatchPariallyProcessed, batchCounters) - // if err != nil || handled { - // return err - // } - if !isLastBatchPariallyProcessed { // handle case where batch wasn't closed properly // close it before starting a new one diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 5218909ab60..ba96d273a1a 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -37,43 +37,6 @@ func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, is return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), intermediateUsedCounters), nil } -// func doInstantCloseAfterUnwindOfBatchVerificationErrorIfNeeded(batchContext *BatchContext, batchState *BatchState, isLastBatchPariallyProcessed bool, batchCounters *vm.BatchCounterCollector) (bool, error) { -// // instantClose, err := batchContext.sdb.hermezDb.GetJustUnwound(blockNumber) -// // if err != nil || !instantClose { -// // return false, err // err here could be nil as well -// // } - -// // if err = batchContext.sdb.hermezDb.DeleteJustUnwound(blockNumber); err != nil { -// // return false, err -// // } - -// if !isLastBatchPariallyProcessed || !batchState.isLimboRecovery() { -// return false, nil -// } - -// // lets first check if we actually wrote any blocks in this batch -// blocks, err := batchContext.sdb.hermezDb.GetL2BlockNosByBatch(batchState.batchNumber) -// if err != nil { -// return false, err -// } - -// // only close this batch down if we actually made any progress in it, otherwise -// // just continue processing as normal and recreate the batch from scratch -// if len(blocks) > 0 { -// if err = runBatchLastSteps(batchContext, batchState.batchNumber, blocks[len(blocks)-1], batchCounters); err != nil { -// return false, err -// } -// if err = updateSequencerProgress(batchContext.sdb.tx, blocks[len(blocks)-1], batchState.batchNumber, 1, false); err != nil { -// return false, err -// } - -// err = batchContext.sdb.tx.Commit() -// return err == nil, err -// } - -// return false, nil -// } - func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) (bool, error) { infoTreeIndex, err := batchState.batchL1RecoveryData.getInfoTreeIndex(batchContext.sdb) if err != nil { @@ -151,11 +114,6 @@ func updateStreamAndCheckRollback( return false, err } - // // we are about to unwind so place the marker ready for this to happen - // if err = batchContext.sdb.hermezDb.WriteJustUnwound(verifierBundle.Request.GetLastBlockNumber()); err != nil { - // return false, err - // } - unwindTo := verifierBundle.Request.GetLastBlockNumber() - 1 // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index 6e46f2fbb70..47f5208d643 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -67,40 +67,15 @@ func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber return limboStreamBytesGroups } -func handleLimbo( - batchContext *BatchContext, - batchState *BatchState, - verifierBundle *legacy_executor_verifier.VerifierBundle, - // pool *txpool.TxPool, - // chainConfig *chain.Config, -) error { +func handleLimbo(batchContext *BatchContext, batchState *BatchState, verifierBundle *legacy_executor_verifier.VerifierBundle) error { request := verifierBundle.Request response := verifierBundle.Response legacyVerifier := batchContext.cfg.legacyVerifier log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", batchContext.s.LogPrefix()), "batch", request.BatchNumber) - // we have an invalid batch, so we need to notify the txpool that these transactions are spurious - // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the - // pool, but as it knows about these limbo transactions it will place them into limbo instead - // of queueing them again - - // now we need to figure out the highest block number in the batch - // and grab all the transaction hashes along the way to inform the - // pool of hashes to avoid - // blockNumbers := request.BlockNumbers - // sort.Slice(blockNumbers, func(i, j int) bool { - // return blockNumbers[i] < blockNumbers[j] - // }) - - var lowestBlock, highestBlock *types.Block - forkId, err := batchContext.sdb.hermezDb.GetForkId(request.BatchNumber) - if err != nil { - return err - } l1InfoTreeMinTimestamps := make(map[uint64]uint64) - // if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blockNumbers, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { - if _, err = legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, []uint64{request.GetLastBlockNumber()}, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { + if _, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, []uint64{request.GetLastBlockNumber()}, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { return err } @@ -111,20 +86,13 @@ func handleLimbo( limboDetails.Witness = response.Witness limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps limboDetails.BatchNumber = request.BatchNumber - limboDetails.ForkId = forkId + limboDetails.ForkId = request.ForkId - // for _, blockNumber := range blockNumbers { blockNumber := request.GetLastBlockNumber() block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) if err != nil { return err } - highestBlock = block - if lowestBlock == nil { - // capture the first block, then we can set the bad block hash in the unwind to terminate the - // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run - lowestBlock = block - } for i, transaction := range block.Transactions() { var b []byte @@ -158,10 +126,9 @@ func handleLimbo( log.Info(fmt.Sprintf("[%s] adding transaction to limbo", batchContext.s.LogPrefix()), "hash", hash) } - // } - limboDetails.TimestampLimit = highestBlock.Time() - limboDetails.FirstBlockNumber = lowestBlock.NumberU64() + limboDetails.TimestampLimit = block.Time() + limboDetails.FirstBlockNumber = block.NumberU64() batchContext.cfg.txPool.ProcessLimboBatchDetails(limboDetails) return nil } From e98d8d6105924badf3f0dba2a10dcd90ef9e41a3 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 06:52:43 +0000 Subject: [PATCH 17/33] remove batch instant close --- cmd/utils/flags.go | 5 ----- eth/ethconfig/config_zkevm.go | 1 - turbo/cli/default_flags.go | 1 - turbo/cli/flags_zkevm.go | 9 +-------- zk/stages/stage_sequence_execute.go | 13 +------------ zk/stages/stage_sequence_execute_utils.go | 11 +++++++++++ 6 files changed, 13 insertions(+), 27 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4ca7d6e693b..364958b87ba 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -471,11 +471,6 @@ var ( Usage: "Batch seal time. Defaults to 12s", Value: "12s", } - SequencerNonEmptyBatchSealTime = cli.StringFlag{ - Name: "zkevm.sequencer-non-empty-batch-seal-time", - Usage: "Batch seal time. Defaults to 3s", - Value: "3s", - } ExecutorUrls = cli.StringFlag{ Name: "zkevm.executor-urls", Usage: "A comma separated list of grpc addresses that host executors", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 4941420e9fc..8cd3b534bec 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -30,7 +30,6 @@ type Zk struct { DatastreamVersion int SequencerBlockSealTime time.Duration SequencerBatchSealTime time.Duration - SequencerNonEmptyBatchSealTime time.Duration ExecutorUrls []string ExecutorStrictMode bool ExecutorRequestTimeout time.Duration diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 12b8595369a..0d57fc0a6ff 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -190,7 +190,6 @@ var DefaultFlags = []cli.Flag{ &utils.SmtRegenerateInMemory, &utils.SequencerBlockSealTime, &utils.SequencerBatchSealTime, - &utils.SequencerNonEmptyBatchSealTime, &utils.ExecutorUrls, &utils.ExecutorStrictMode, &utils.ExecutorRequestTimeout, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 5a29a1f6017..f3114700fac 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -12,8 +12,8 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/zk/sequencer" - "github.com/urfave/cli/v2" utils2 "github.com/ledgerwatch/erigon/zk/utils" + "github.com/urfave/cli/v2" ) func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { @@ -76,12 +76,6 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerBatchSealTimeVal)) } - sequencerNonEmptyBatchSealTimeVal := ctx.String(utils.SequencerNonEmptyBatchSealTime.Name) - sequencerNonEmptyBatchSealTime, err := time.ParseDuration(sequencerNonEmptyBatchSealTimeVal) - if err != nil { - panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerNonEmptyBatchSealTimeVal)) - } - effectiveGasPriceForEthTransferVal := ctx.Float64(utils.EffectiveGasPriceForEthTransfer.Name) effectiveGasPriceForErc20TransferVal := ctx.Float64(utils.EffectiveGasPriceForErc20Transfer.Name) effectiveGasPriceForContractInvocationVal := ctx.Float64(utils.EffectiveGasPriceForContractInvocation.Name) @@ -126,7 +120,6 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { SmtRegenerateInMemory: ctx.Bool(utils.SmtRegenerateInMemory.Name), SequencerBlockSealTime: sequencerBlockSealTime, SequencerBatchSealTime: sequencerBatchSealTime, - SequencerNonEmptyBatchSealTime: sequencerNonEmptyBatchSealTime, ExecutorUrls: strings.Split(ctx.String(utils.ExecutorUrls.Name), ","), ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name), ExecutorRequestTimeout: ctx.Duration(utils.ExecutorRequestTimeout.Name), diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index e6a32a6329d..e2b27061eb0 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -118,13 +118,9 @@ func SpawnSequencingStage( } } - batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) + batchTicker, logTicker, blockTicker := prepareTickers(batchContext.cfg) defer batchTicker.Stop() - nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) - defer nonEmptyBatchTimer.Stop() - logTicker := time.NewTicker(10 * time.Second) defer logTicker.Stop() - blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) defer blockTicker.Stop() log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) @@ -205,11 +201,6 @@ func SpawnSequencingStage( runLoopBlocks = false break LOOP_TRANSACTIONS } - case <-nonEmptyBatchTimer.C: - if !batchState.isAnyRecovery() && batchState.hasAnyTransactionsInThisBatch { - runLoopBlocks = false - break LOOP_TRANSACTIONS - } default: if batchState.isLimboRecovery() { batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) @@ -288,8 +279,6 @@ func SpawnSequencingStage( if err == nil { blockDataSizeChecker = &backupDataSizeChecker batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) - - nonEmptyBatchTimer.Reset(cfg.zk.SequencerNonEmptyBatchSealTime) } } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 124d92d2f81..01c23e4ce8f 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -287,6 +287,17 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, prop return } +func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ticker) { + batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) + defer batchTicker.Stop() + logTicker := time.NewTicker(10 * time.Second) + defer logTicker.Stop() + blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) + defer blockTicker.Stop() + + return batchTicker, logTicker, blockTicker +} + // will be called at the start of every new block created within a batch to figure out if there is a new GER // we can use or not. In the special case that this is the first block we just return 0 as we need to use the // 0 index first before we can use 1+ From d37d92cd1a84d751400a82e5291f1557297933aa Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 07:24:14 +0000 Subject: [PATCH 18/33] remove defer --- zk/stages/stage_sequence_execute_utils.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 01c23e4ce8f..56840378e0d 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -289,11 +289,8 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, prop func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ticker) { batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) - defer batchTicker.Stop() logTicker := time.NewTicker(10 * time.Second) - defer logTicker.Stop() blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) - defer blockTicker.Stop() return batchTicker, logTicker, blockTicker } From 7ee3982c60023db44419d0384bd74989d9851bfb Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 08:56:02 +0000 Subject: [PATCH 19/33] fix limbo witness generation --- .../legacy_executor_verifier.go | 12 +++++++++--- zk/stages/stage_sequence_execute_limbo.go | 2 -- zk/txpool/pool_zk_limbo.go | 11 ----------- zk/txpool/pool_zk_limbo_processor.go | 5 +++-- 4 files changed, 12 insertions(+), 18 deletions(-) diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 981fe66cc14..c454df1e50c 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -141,7 +141,13 @@ func (v *LegacyExecutorVerifier) appendPromise(promise *Promise[*VerifierBundle] v.promises = append(v.promises, promise) } -func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, witness, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { +func (v *LegacyExecutorVerifier) VerifySync(ctx context.Context, tx kv.Tx, request *VerifierRequest, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { + blockNumbers := []uint64{firstBlockNumber} + witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) + if err != nil { + return err + } + oldAccInputHash := common.HexToHash("0x0") payload := &Payload{ Witness: witness, @@ -253,8 +259,8 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) - if request.GetLastBlockNumber() == 7 && counter == 0 { - // ok = false + if request.GetLastBlockNumber() == 8 && counter == 0 { + ok = false counter = 1 } diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index 47f5208d643..6e64afc8c8c 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -69,7 +69,6 @@ func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber func handleLimbo(batchContext *BatchContext, batchState *BatchState, verifierBundle *legacy_executor_verifier.VerifierBundle) error { request := verifierBundle.Request - response := verifierBundle.Response legacyVerifier := batchContext.cfg.legacyVerifier log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", batchContext.s.LogPrefix()), "batch", request.BatchNumber) @@ -83,7 +82,6 @@ func handleLimbo(batchContext *BatchContext, batchState *BatchState, verifierBun limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() limboDetails := txpool.NewLimboBatchDetails() - limboDetails.Witness = response.Witness limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps limboDetails.BatchNumber = request.BatchNumber limboDetails.ForkId = request.ForkId diff --git a/zk/txpool/pool_zk_limbo.go b/zk/txpool/pool_zk_limbo.go index 061accbb009..fb4cef08b6a 100644 --- a/zk/txpool/pool_zk_limbo.go +++ b/zk/txpool/pool_zk_limbo.go @@ -22,7 +22,6 @@ const ( DbKeyBatchesPrefix = uint8(3) DbKeyAwaitingBlockHandlingPrefix = uint8(4) - DbKeyBatchesWitnessPrefix = uint8(5) DbKeyBatchesL1InfoTreePrefix = uint8(6) DbKeyBatchesTimestampLimitPrefix = uint8(7) DbKeyBatchesFirstBlockNumberPrefix = uint8(8) @@ -146,7 +145,6 @@ func (_this *Limbo) getLimboTxDetailsByTxHash(txHash *common.Hash) (*LimboBatchD } type LimboBatchDetails struct { - Witness []byte L1InfoTreeMinTimestamps map[uint64]uint64 TimestampLimit uint64 FirstBlockNumber uint64 @@ -396,13 +394,6 @@ func (p *TxPool) flushLockedLimbo(tx kv.RwTx) (err error) { for i, limboBatch := range p.limbo.limboBatches { binary.LittleEndian.PutUint32(keyBytes[1:5], uint32(i)) - // Witness - keyBytes[5] = DbKeyBatchesWitnessPrefix - binary.LittleEndian.PutUint64(keyBytes[6:14], 0) - if err := tx.Put(TablePoolLimbo, keyBytes, limboBatch.Witness); err != nil { - return err - } - // L1InfoTreeMinTimestamps keyBytes[5] = DbKeyBatchesL1InfoTreePrefix for k, v := range limboBatch.L1InfoTreeMinTimestamps { @@ -542,8 +533,6 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca p.limbo.resizeBatches(int(batchesI) + 1) switch k[5] { - case DbKeyBatchesWitnessPrefix: - p.limbo.limboBatches[batchesI].Witness = v case DbKeyBatchesL1InfoTreePrefix: p.limbo.limboBatches[batchesI].L1InfoTreeMinTimestamps[batchesJ] = binary.LittleEndian.Uint64(v) case DbKeyBatchesTimestampLimitPrefix: diff --git a/zk/txpool/pool_zk_limbo_processor.go b/zk/txpool/pool_zk_limbo_processor.go index c74e47aefae..989e42b9e00 100644 --- a/zk/txpool/pool_zk_limbo_processor.go +++ b/zk/txpool/pool_zk_limbo_processor.go @@ -83,12 +83,13 @@ func (_this *LimboSubPoolProcessor) run() { unlimitedCounters[k] = math.MaxInt32 } + blockNumbers := []uint64{1} // let's assume that there is a just single block number 1, because the number itself does not matter invalidTxs := []*string{} for _, limboBatch := range limboBatchDetails { for _, limboTx := range limboBatch.Transactions { - request := legacy_executor_verifier.NewVerifierRequest(limboBatch.ForkId, limboBatch.BatchNumber, []uint64{1}, limboTx.Root, unlimitedCounters) // let's assume that there is a just single block number 1 - err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) + request := legacy_executor_verifier.NewVerifierRequest(limboBatch.ForkId, limboBatch.BatchNumber, blockNumbers, limboTx.Root, unlimitedCounters) + err := _this.verifier.VerifySync(ctx, tx, request, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) if err != nil { idHash := hexutils.BytesToHex(limboTx.Hash[:]) invalidTxs = append(invalidTxs, &idHash) From fb3d16788bb1176e3f9319d1e96ace2a39a12ca4 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 09:01:21 +0000 Subject: [PATCH 20/33] restoring blockEnd in DS --- zk/datastream/server/data_stream_server.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 0d495e60bc1..c734dbbd0c9 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -227,8 +227,7 @@ func createFullBlockStreamEntriesProto( batchNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64, ) (*DataStreamEntries, error) { - // entries := NewDataStreamEntries(len(filteredTransactions) + 3) // block bookmark + block + block end - entries := NewDataStreamEntries(len(filteredTransactions) + 2) // block bookmark + block + block end + entries := NewDataStreamEntries(len(filteredTransactions) + 3) // block bookmark + block + block end blockNum := block.NumberU64() // L2 BLOCK BOOKMARK entries.Add(newL2BlockBookmarkEntryProto(blockNum)) @@ -275,7 +274,7 @@ func createFullBlockStreamEntriesProto( entries.Add(transaction) } - // entries.Add(newL2BlockEndProto(blockNum)) + entries.Add(newL2BlockEndProto(blockNum)) return entries, nil } From 01d859346ed2d65e7016ae04f85162584e379cfa Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 10:01:26 +0000 Subject: [PATCH 21/33] update witness generation in case of limbo --- .../legacy_executor_verifier.go | 14 ++++---------- zk/stages/stage_sequence_execute_limbo.go | 8 +++++++- zk/txpool/pool_zk_limbo.go | 11 +++++++++++ zk/txpool/pool_zk_limbo_processor.go | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index c454df1e50c..66d5041b5ad 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -87,7 +87,7 @@ type LegacyExecutorVerifier struct { cancelAllVerifications atomic.Bool streamServer *server.DataStreamServer - witnessGenerator WitnessGenerator + WitnessGenerator WitnessGenerator promises []*Promise[*VerifierBundle] mtxPromises *sync.Mutex @@ -109,7 +109,7 @@ func NewLegacyExecutorVerifier( executorNumber: 0, cancelAllVerifications: atomic.Bool{}, streamServer: streamServer, - witnessGenerator: witnessGenerator, + WitnessGenerator: witnessGenerator, promises: make([]*Promise[*VerifierBundle], 0), mtxPromises: &sync.Mutex{}, } @@ -141,13 +141,7 @@ func (v *LegacyExecutorVerifier) appendPromise(promise *Promise[*VerifierBundle] v.promises = append(v.promises, promise) } -func (v *LegacyExecutorVerifier) VerifySync(ctx context.Context, tx kv.Tx, request *VerifierRequest, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { - blockNumbers := []uint64{firstBlockNumber} - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) - if err != nil { - return err - } - +func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, witness, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { oldAccInputHash := common.HexToHash("0x0") payload := &Payload{ Witness: witness, @@ -221,7 +215,7 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb return verifierBundle, err } - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, innerCtx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) + witness, err := v.WitnessGenerator.GetWitnessByBlockRange(tx, innerCtx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) if err != nil { return verifierBundle, err } diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go index 6e64afc8c8c..64a9b6ae6e1 100644 --- a/zk/stages/stage_sequence_execute_limbo.go +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -78,15 +78,21 @@ func handleLimbo(batchContext *BatchContext, batchState *BatchState, verifierBun return err } + blockNumber := request.GetLastBlockNumber() + witness, err := legacyVerifier.WitnessGenerator.GetWitnessByBlockRange(batchContext.sdb.tx, batchContext.ctx, blockNumber, blockNumber, false, batchContext.cfg.zk.WitnessFull) + if err != nil { + return err + } + limboSendersToPreviousTxMap := make(map[string]uint32) limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() limboDetails := txpool.NewLimboBatchDetails() + limboDetails.Witness = witness limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps limboDetails.BatchNumber = request.BatchNumber limboDetails.ForkId = request.ForkId - blockNumber := request.GetLastBlockNumber() block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) if err != nil { return err diff --git a/zk/txpool/pool_zk_limbo.go b/zk/txpool/pool_zk_limbo.go index fb4cef08b6a..061accbb009 100644 --- a/zk/txpool/pool_zk_limbo.go +++ b/zk/txpool/pool_zk_limbo.go @@ -22,6 +22,7 @@ const ( DbKeyBatchesPrefix = uint8(3) DbKeyAwaitingBlockHandlingPrefix = uint8(4) + DbKeyBatchesWitnessPrefix = uint8(5) DbKeyBatchesL1InfoTreePrefix = uint8(6) DbKeyBatchesTimestampLimitPrefix = uint8(7) DbKeyBatchesFirstBlockNumberPrefix = uint8(8) @@ -145,6 +146,7 @@ func (_this *Limbo) getLimboTxDetailsByTxHash(txHash *common.Hash) (*LimboBatchD } type LimboBatchDetails struct { + Witness []byte L1InfoTreeMinTimestamps map[uint64]uint64 TimestampLimit uint64 FirstBlockNumber uint64 @@ -394,6 +396,13 @@ func (p *TxPool) flushLockedLimbo(tx kv.RwTx) (err error) { for i, limboBatch := range p.limbo.limboBatches { binary.LittleEndian.PutUint32(keyBytes[1:5], uint32(i)) + // Witness + keyBytes[5] = DbKeyBatchesWitnessPrefix + binary.LittleEndian.PutUint64(keyBytes[6:14], 0) + if err := tx.Put(TablePoolLimbo, keyBytes, limboBatch.Witness); err != nil { + return err + } + // L1InfoTreeMinTimestamps keyBytes[5] = DbKeyBatchesL1InfoTreePrefix for k, v := range limboBatch.L1InfoTreeMinTimestamps { @@ -533,6 +542,8 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca p.limbo.resizeBatches(int(batchesI) + 1) switch k[5] { + case DbKeyBatchesWitnessPrefix: + p.limbo.limboBatches[batchesI].Witness = v case DbKeyBatchesL1InfoTreePrefix: p.limbo.limboBatches[batchesI].L1InfoTreeMinTimestamps[batchesJ] = binary.LittleEndian.Uint64(v) case DbKeyBatchesTimestampLimitPrefix: diff --git a/zk/txpool/pool_zk_limbo_processor.go b/zk/txpool/pool_zk_limbo_processor.go index 989e42b9e00..ef46e18e5b1 100644 --- a/zk/txpool/pool_zk_limbo_processor.go +++ b/zk/txpool/pool_zk_limbo_processor.go @@ -89,7 +89,7 @@ func (_this *LimboSubPoolProcessor) run() { for _, limboBatch := range limboBatchDetails { for _, limboTx := range limboBatch.Transactions { request := legacy_executor_verifier.NewVerifierRequest(limboBatch.ForkId, limboBatch.BatchNumber, blockNumbers, limboTx.Root, unlimitedCounters) - err := _this.verifier.VerifySync(ctx, tx, request, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) + err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) if err != nil { idHash := hexutils.BytesToHex(limboTx.Hash[:]) invalidTxs = append(invalidTxs, &idHash) From fb9ae1e8316908d65b060304b2733bf3f5aaba69 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 11:43:47 +0000 Subject: [PATCH 22/33] remove hardcoded forkid --- zk/stages/stage_sequence_execute_utils.go | 1 - 1 file changed, 1 deletion(-) diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index ebb1eb42eda..6cda5066d57 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -193,7 +193,6 @@ func prepareForkId(lastBatch, executionAt uint64, hermezDb forkDb) (uint64, erro } } - latest = 11 if latest == 0 { return 0, fmt.Errorf("could not find a suitable fork for batch %v, cannot start sequencer, check contract configuration", lastBatch+1) } From 1b8d293d6f836cb9017a1307e12b91d1be77cc90 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Fri, 2 Aug 2024 12:33:07 +0000 Subject: [PATCH 23/33] create our buckets into txpool db --- eth/backend.go | 4 ---- zk/txpool/pool_zk_limbo.go | 8 ++++---- zk/txpool/txpooluitl/all_components.go | 8 +++++++- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 48354b99df1..9245df2bcf5 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -965,10 +965,6 @@ func createBuckets(tx kv.RwTx) error { return err } - if err := txpool.CreateTxPoolBuckets(tx); err != nil { - return err - } - return nil } diff --git a/zk/txpool/pool_zk_limbo.go b/zk/txpool/pool_zk_limbo.go index 9bb6f57719b..f79e827fe3c 100644 --- a/zk/txpool/pool_zk_limbo.go +++ b/zk/txpool/pool_zk_limbo.go @@ -499,15 +499,15 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca return nil } + p.limbo.limboSlots = &types.TxSlots{} + parseCtx := types.NewTxParseContext(p.chainID) + parseCtx.WithSender(false) + it, err := tx.Range(TablePoolLimbo, nil, nil) if err != nil { return err } - p.limbo.limboSlots = &types.TxSlots{} - parseCtx := types.NewTxParseContext(p.chainID) - parseCtx.WithSender(false) - for it.HasNext() { k, v, err := it.Next() if err != nil { diff --git a/zk/txpool/txpooluitl/all_components.go b/zk/txpool/txpooluitl/all_components.go index e7fba40e4e4..f98de551529 100644 --- a/zk/txpool/txpooluitl/all_components.go +++ b/zk/txpool/txpooluitl/all_components.go @@ -22,8 +22,8 @@ import ( "time" "github.com/c2h5oh/datasize" - "github.com/holiman/uint256" "github.com/gateway-fm/cdk-erigon-lib/txpool/txpoolcfg" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" mdbx2 "github.com/torquem-ch/mdbx-go/mdbx" @@ -134,6 +134,12 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, ethCfg *ethconfig. return nil, nil, nil, nil, nil, err } + if err = txPoolDB.Update(ctx, func(tx kv.RwTx) error { + return txpool.CreateTxPoolBuckets(tx) + }); err != nil { + return nil, nil, nil, nil, nil, err + } + fetch := txpool.NewFetch(ctx, sentryClients, txPool, stateChangesClient, chainDB, txPoolDB, *chainID) //fetch.ConnectCore() //fetch.ConnectSentries() From 5239d768356d4546a49ae94ee7f6296eea62db2c Mon Sep 17 00:00:00 2001 From: Valentin Staykov Date: Fri, 2 Aug 2024 13:47:12 +0000 Subject: [PATCH 24/33] optimizing datastream writes --- eth/backend.go | 24 +++++------ zk/datastream/server/data_stream_server.go | 44 +++++++++++++++------ zk/datastream/server/datastream_populate.go | 27 +++++++------ zk/stages/stage_sequence_execute_utils.go | 6 --- 4 files changed, 59 insertions(+), 42 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 9245df2bcf5..e11a15fc582 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -635,7 +635,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { }() + tx, err := backend.chainDB.BeginRw(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + if !config.DeprecatedTxPool.Disable { + // we need to start the pool before stage loop itself + // the pool holds the info about how execution stage should work - as regular or as limbo recovery + if err := backend.txPool2.StartIfNotStarted(ctx, backend.txPool2DB, tx); err != nil { + return nil, err + } + backend.txPool2Fetch.ConnectCore() backend.txPool2Fetch.ConnectSentries() var newTxsBroadcaster *txpool2.NewSlotsStreams @@ -696,12 +708,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.ethBackendRPC, backend.miningRPC, backend.stateChangesClient = ethBackendRPC, miningRPC, stateDiffClient - tx, err := backend.chainDB.BeginRw(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - // create buckets if err := createBuckets(tx); err != nil { return nil, err @@ -864,12 +870,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // we switch context from being an RPC node to a sequencer backend.txPool2.ForceUpdateLatestBlock(executionProgress) - // we need to start the pool before stage loop itself - // the pool holds the info about how execution stage should work - as regular or as limbo recovery - if err := backend.txPool2.StartIfNotStarted(ctx, backend.txPool2DB, tx); err != nil { - return nil, err - } - l1BlockSyncer := syncer.NewL1Syncer( ctx, ethermanClients, diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index c734dbbd0c9..0d3fd1129d5 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -43,6 +43,7 @@ type DataStreamServer struct { stream *datastreamer.StreamServer chainId uint64 highestBlockWritten, + highestClosedBatchWritten, highestBatchWritten *uint64 } @@ -118,7 +119,33 @@ func NewDataStreamEntries(size int) *DataStreamEntries { } } -func (srv *DataStreamServer) CommitEntriesToStreamProto(entries []DataStreamEntryProto, latestBlockNum, latestBatchNum *uint64) error { +func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { + if err := srv.stream.CommitAtomicOp(); err != nil { + return err + } + + // copy the values in case they are changed outside the function + // pointers are used for easier check if we should set check them from the DS or not + // since 0 is a valid number, we can't use it + if latestBlockNum != nil { + a := *latestBlockNum + srv.highestBlockWritten = &a + } + + if latestBatchNum != nil { + a := *latestBatchNum + srv.highestBatchWritten = &a + } + + if latestClosedBatch != nil { + a := *latestClosedBatch + srv.highestClosedBatchWritten = &a + } + + return nil +} + +func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { for _, entry := range entries { entryType := entry.Type() @@ -137,16 +164,6 @@ func (srv *DataStreamServer) CommitEntriesToStreamProto(entries []DataStreamEntr } } } - - if latestBlockNum != nil { - a := *latestBlockNum - srv.highestBlockWritten = &a - } - - if latestBatchNum != nil { - a := *latestBatchNum - srv.highestBatchWritten = &a - } return nil } @@ -473,6 +490,9 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { } func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { + if srv.highestClosedBatchWritten != nil { + return *srv.highestClosedBatchWritten, nil + } entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchEnd)) if err != nil { return 0, err @@ -486,6 +506,8 @@ func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { return 0, err } + srv.highestClosedBatchWritten = &batch.Number + return batch.Number, nil } diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index 839cb9029c2..414e8826e96 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -75,11 +75,11 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( return err } - if err = srv.CommitEntriesToStreamProto(entries.Entries(), &toBlockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries.Entries()); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&toBlockNum, &batchNum, &batchNum); err != nil { return err } @@ -188,18 +188,18 @@ LOOP: // basically commit once 80% of the entries array is filled if len(entries) >= commitEntryCountLimit { log.Info(fmt.Sprintf("[%s] Commit count reached, committing entries", logPrefix), "block", currentBlockNumber) - if err = srv.CommitEntriesToStreamProto(entries, ¤tBlockNumber, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries); err != nil { return err } entries = make([]DataStreamEntryProto, 0, insertEntryCount) } } - if err = srv.CommitEntriesToStreamProto(entries, &to, &latestbatchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&to, &batchNum, &latestbatchNum); err != nil { return err } @@ -257,16 +257,16 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( } if batchStartEntries != nil { - if err = srv.CommitEntriesToStreamProto(batchStartEntries.Entries(), &blockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(batchStartEntries.Entries()); err != nil { return err } } - if err = srv.CommitEntriesToStreamProto(blockEntries.Entries(), &blockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(blockEntries.Entries()); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&blockNum, &batchNum, nil); err != nil { return err } @@ -341,11 +341,12 @@ func (srv *DataStreamServer) WriteBatchEnd( return err } - if err = srv.CommitEntriesToStreamProto(batchEndEntries, nil, nil); err != nil { + if err = srv.commitEntriesToStreamProto(batchEndEntries); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + // we write only batch end, so dont't update latest block and batch + if err = srv.commitAtomicOp(nil, nil, &batchNumber); err != nil { return err } @@ -385,12 +386,12 @@ func (srv *DataStreamServer) WriteGenesisToStream( } batchEnd := newBatchEndProto(ler, genesis.Root(), 0) - blockNum := uint64(0) - if err = srv.CommitEntriesToStreamProto([]DataStreamEntryProto{batchBookmark, batchStart, l2BlockBookmark, l2Block, batchEnd}, &blockNum, &batchNo); err != nil { + if err = srv.commitEntriesToStreamProto([]DataStreamEntryProto{batchBookmark, batchStart, l2BlockBookmark, l2Block, batchEnd}); err != nil { return err } - err = srv.stream.CommitAtomicOp() + // should be okay to write just zeroes here, but it is a single time in a node start, so no use to risk + err = srv.commitAtomicOp(nil, nil, nil) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 6cda5066d57..e76f1738ff9 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -42,12 +42,6 @@ import ( const ( logInterval = 20 * time.Second transactionGasLimit = 30000000 - yieldSize = 100 // arbitrary number defining how many transactions to yield from the pool at once - - // this is the max number of send transactions that can be included in a block without overflowing counters - // this is for simple send transactions, any other type would consume more counters - // - preForkId11TxLimit = 444 ) var ( From 7cce8264a556bdf6fd97e9f0765493259d33662e Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 5 Aug 2024 13:19:59 +0000 Subject: [PATCH 25/33] update comment --- zk/stages/stage_sequence_execute_batch.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 73b244b06bf..8923a0318fc 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -105,7 +105,11 @@ func updateStreamAndCheckRollback( continue } - // updateStreamAndCheckRollback cannot be invoked during l1 recovery so no point to check it + // The sequencer can goes to this point of the code only in L1Recovery mode or Default Mode. + // There is no way to get here in LimboRecoveryMode + // If we are here in L1RecoveryMode then let's stop everything by using an infinite loop because something is quite wrong + // If we are here in Default mode and limbo is disabled then again do the same as in L1RecoveryMode + // If we are here in Default mode and limbo is enabled then continue normal flow if batchState.isL1Recovery() || !batchContext.cfg.zk.Limbo { infiniteLoop(verifierBundle.Request.BatchNumber) } From 7e72b4fc9f5a288694dc4da74b24a9e05bf9987a Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Mon, 5 Aug 2024 13:33:35 +0000 Subject: [PATCH 26/33] add timer, remove old code and organize func's args --- core/blockchain_zkevm.go | 34 ------------------- .../legacy_executor_verifier.go | 3 ++ zk/stages/stage_sequence_execute.go | 2 +- zk/stages/stage_sequence_execute_state.go | 13 ++++--- 4 files changed, 12 insertions(+), 40 deletions(-) diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index 6c338eabab1..26ac082e7be 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -296,40 +296,6 @@ func PrepareBlockTxExecution( return &blockContextImpl, excessDataGas, &blockGer, &blockL1BlockHash, nil } -// func FinalizeBlockExecutionWithHistoryWrite( -// engine consensus.Engine, stateReader state.StateReader, -// header *types.Header, txs types.Transactions, uncles []*types.Header, -// stateWriter state.WriterWithChangeSets, cc *chain.Config, -// ibs *state.IntraBlockState, receipts types.Receipts, -// withdrawals []*types.Withdrawal, headerReader consensus.ChainHeaderReader, -// isMining bool, excessDataGas *big.Int, -// ) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { -// newBlock, newTxs, newReceipt, err = FinalizeBlockExecution( -// engine, -// stateReader, -// header, -// txs, -// uncles, -// stateWriter, -// cc, -// ibs, -// receipts, -// withdrawals, -// headerReader, -// isMining, -// excessDataGas, -// ) -// if err != nil { -// return nil, nil, nil, err -// } - -// if err := stateWriter.WriteHistory(); err != nil { -// return nil, nil, nil, fmt.Errorf("writing history for block %d failed: %w", header.Number.Uint64(), err) -// } - -// return newBlock, newTxs, newReceipt, nil -// } - func CreateReceiptForBlockInfoTree(receipt *types.Receipt, chainConfig *chain.Config, blockNum uint64, execResult *ExecutionResult) *types.Receipt { // [hack]TODO: remove this after bug is fixed localReceipt := receipt.Clone() diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index b5b04e2b00c..6dd3f663ddf 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -186,6 +186,9 @@ func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumb return verifierBundle, ErrNoExecutorAvailable } + t := utils.StartTimer("legacy-executor-verifier", "verify-async") + defer t.LogTimer() + e.AquireAccess() defer e.ReleaseAccess() if v.cancelAllVerifications.Load() { diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index f07a5ea3f93..95858c46eda 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -105,7 +105,7 @@ func SpawnSequencingStage( } // let's check if we have any L1 data to recover - if err = batchState.batchL1RecoveryData.loadBatchData(sdb, batchState.batchNumber, batchState.forkId); err != nil { + if err = batchState.batchL1RecoveryData.loadBatchData(sdb); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 79853ab0098..20245564fdd 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -60,7 +60,7 @@ func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recove } if l1Recovery { - batchState.batchL1RecoveryData = newBatchL1RecoveryData() + batchState.batchL1RecoveryData = newBatchL1RecoveryData(batchState) } limboHeaderTimestamp, limboTxHash := txPool.GetLimboTxHash(batchState.batchNumber) @@ -127,14 +127,17 @@ func (bs *BatchState) onBuiltBlock(blockNumber uint64) { type BatchL1RecoveryData struct { recoveredBatchDataSize int recoveredBatchData *l1_data.DecodedL1Data + batchState *BatchState } -func newBatchL1RecoveryData() *BatchL1RecoveryData { - return &BatchL1RecoveryData{} +func newBatchL1RecoveryData(batchState *BatchState) *BatchL1RecoveryData { + return &BatchL1RecoveryData{ + batchState: batchState, + } } -func (batchL1RecoveryData *BatchL1RecoveryData) loadBatchData(sdb *stageDb, thisBatch, forkId uint64) (err error) { - batchL1RecoveryData.recoveredBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) +func (batchL1RecoveryData *BatchL1RecoveryData) loadBatchData(sdb *stageDb) (err error) { + batchL1RecoveryData.recoveredBatchData, err = l1_data.BreakDownL1DataByBatch(batchL1RecoveryData.batchState.batchNumber, batchL1RecoveryData.batchState.forkId, sdb.hermezDb.HermezDbReader) if err != nil { return err } From 894cda9e03c19a56b07582544ee4c9ddb830bcc9 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov <24619432+kstoykov@users.noreply.github.com> Date: Tue, 6 Aug 2024 15:51:49 +0300 Subject: [PATCH 27/33] l1infotree - highest seen + tiny refactor (#918) * l1infotree - highest seen + tiny refactor * l1infotreeindexprogress * remove useless param * update how l1infotreeindexprogress is stored --- eth/stagedsync/stages/stages_zk.go | 6 +- zk/hermez_db/db.go | 151 +++++++----------- zk/stages/stage_batches.go | 28 ++-- zk/stages/stage_sequence_execute.go | 11 +- zk/stages/stage_sequence_execute_blocks.go | 3 +- .../stage_sequence_execute_injected_batch.go | 2 +- zk/stages/stage_sequence_execute_unwind.go | 15 +- zk/stages/stage_sequence_execute_utils.go | 7 +- 8 files changed, 94 insertions(+), 129 deletions(-) diff --git a/eth/stagedsync/stages/stages_zk.go b/eth/stagedsync/stages/stages_zk.go index c5cb45b2d98..4ac4583fa82 100644 --- a/eth/stagedsync/stages/stages_zk.go +++ b/eth/stagedsync/stages/stages_zk.go @@ -28,7 +28,7 @@ var ( ForkId SyncStage = "ForkId" L1SequencerSync SyncStage = "L1SequencerSync" L1InfoTree SyncStage = "L1InfoTree" - HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" - SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" - L1BlockSync SyncStage = "L1BlockSync" + // HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" + SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" + L1BlockSync SyncStage = "L1BlockSync" ) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index bcb70ea3fc5..f568dfd3a57 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -15,40 +15,41 @@ import ( "github.com/ledgerwatch/log/v3" ) -const L1VERIFICATIONS = "hermez_l1Verifications" // l1blockno, batchno -> l1txhash -const L1SEQUENCES = "hermez_l1Sequences" // l1blockno, batchno -> l1txhash -const FORKIDS = "hermez_forkIds" // batchNo -> forkId -const FORKID_BLOCK = "hermez_forkIdBlock" // forkId -> startBlock -const BLOCKBATCHES = "hermez_blockBatches" // l2blockno -> batchno -const GLOBAL_EXIT_ROOTS = "hermez_globalExitRootsSaved" // GER -> true -const BLOCK_GLOBAL_EXIT_ROOTS = "hermez_globalExitRoots" // l2blockno -> GER -const GLOBAL_EXIT_ROOTS_BATCHES = "hermez_globalExitRoots_batches" // batchkno -> GER -const TX_PRICE_PERCENTAGE = "hermez_txPricePercentage" // txHash -> txPricePercentage -const STATE_ROOTS = "hermez_stateRoots" // l2blockno -> stateRoot -const L1_INFO_TREE_UPDATES = "l1_info_tree_updates" // index -> L1InfoTreeUpdate -const L1_INFO_TREE_UPDATES_BY_GER = "l1_info_tree_updates_by_ger" // GER -> L1InfoTreeUpdate -const BLOCK_L1_INFO_TREE_INDEX = "block_l1_info_tree_index" // block number -> l1 info tree index -const L1_INJECTED_BATCHES = "l1_injected_batches" // index increasing by 1 -> injected batch for the start of the chain -const BLOCK_INFO_ROOTS = "block_info_roots" // block number -> block info root hash -const L1_BLOCK_HASHES = "l1_block_hashes" // l1 block hash -> true -const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block number -> l1 block hash -const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER -const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot -const BATCH_WITNESSES = "hermez_batch_witnesses" // batch number -> witness -const BATCH_COUNTERS = "hermez_batch_counters" // batch number -> counters -const L1_BATCH_DATA = "l1_batch_data" // batch number -> l1 batch data from transaction call data -const REUSED_L1_INFO_TREE_INDEX = "reused_l1_info_tree_index" // block number => const 1 -const LATEST_USED_GER = "latest_used_ger" // batch number -> GER latest used GER -const BATCH_BLOCKS = "batch_blocks" // batch number -> block numbers (concatenated together) -const SMT_DEPTHS = "smt_depths" // block number -> smt depth -const L1_INFO_LEAVES = "l1_info_leaves" // l1 info tree index -> l1 info tree leaf -const L1_INFO_ROOTS = "l1_info_roots" // root hash -> l1 info tree index -const INVALID_BATCHES = "invalid_batches" // batch number -> true -const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch number -> true -const LOCAL_EXIT_ROOTS = "local_exit_roots" // l2 block number -> local exit root -const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id -const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch -const JUST_UNWOUND = "just_unwound" // batch number -> true +const L1VERIFICATIONS = "hermez_l1Verifications" // l1blockno, batchno -> l1txhash +const L1SEQUENCES = "hermez_l1Sequences" // l1blockno, batchno -> l1txhash +const FORKIDS = "hermez_forkIds" // batchNo -> forkId +const FORKID_BLOCK = "hermez_forkIdBlock" // forkId -> startBlock +const BLOCKBATCHES = "hermez_blockBatches" // l2blockno -> batchno +const GLOBAL_EXIT_ROOTS = "hermez_globalExitRootsSaved" // GER -> true +const BLOCK_GLOBAL_EXIT_ROOTS = "hermez_globalExitRoots" // l2blockno -> GER +const GLOBAL_EXIT_ROOTS_BATCHES = "hermez_globalExitRoots_batches" // batchkno -> GER +const TX_PRICE_PERCENTAGE = "hermez_txPricePercentage" // txHash -> txPricePercentage +const STATE_ROOTS = "hermez_stateRoots" // l2blockno -> stateRoot +const L1_INFO_TREE_UPDATES = "l1_info_tree_updates" // index -> L1InfoTreeUpdate +const L1_INFO_TREE_UPDATES_BY_GER = "l1_info_tree_updates_by_ger" // GER -> L1InfoTreeUpdate +const BLOCK_L1_INFO_TREE_INDEX = "block_l1_info_tree_index" // block number -> l1 info tree index +const BLOCK_L1_INFO_TREE_INDEX_PROGRESS = "block_l1_info_tree_progress" // block number -> l1 info tree progress +const L1_INJECTED_BATCHES = "l1_injected_batches" // index increasing by 1 -> injected batch for the start of the chain +const BLOCK_INFO_ROOTS = "block_info_roots" // block number -> block info root hash +const L1_BLOCK_HASHES = "l1_block_hashes" // l1 block hash -> true +const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block number -> l1 block hash +const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER +const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot +const BATCH_WITNESSES = "hermez_batch_witnesses" // batch number -> witness +const BATCH_COUNTERS = "hermez_batch_counters" // batch number -> counters +const L1_BATCH_DATA = "l1_batch_data" // batch number -> l1 batch data from transaction call data +const REUSED_L1_INFO_TREE_INDEX = "reused_l1_info_tree_index" // block number => const 1 +const LATEST_USED_GER = "latest_used_ger" // batch number -> GER latest used GER +const BATCH_BLOCKS = "batch_blocks" // batch number -> block numbers (concatenated together) +const SMT_DEPTHS = "smt_depths" // block number -> smt depth +const L1_INFO_LEAVES = "l1_info_leaves" // l1 info tree index -> l1 info tree leaf +const L1_INFO_ROOTS = "l1_info_roots" // root hash -> l1 info tree index +const INVALID_BATCHES = "invalid_batches" // batch number -> true +const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch number -> true +const LOCAL_EXIT_ROOTS = "local_exit_roots" // l2 block number -> local exit root +const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id +const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch +const JUST_UNWOUND = "just_unwound" // batch number -> true var HermezDbTables = []string{ L1VERIFICATIONS, @@ -64,6 +65,7 @@ var HermezDbTables = []string{ L1_INFO_TREE_UPDATES, L1_INFO_TREE_UPDATES_BY_GER, BLOCK_L1_INFO_TREE_INDEX, + BLOCK_L1_INFO_TREE_INDEX_PROGRESS, L1_INJECTED_BATCHES, BLOCK_INFO_ROOTS, L1_BLOCK_HASHES, @@ -774,18 +776,6 @@ func (db *HermezDbReader) GetBlockGlobalExitRoot(l2BlockNo uint64) (common.Hash, return common.BytesToHash(bytes), nil } -func (db *HermezDb) TruncateBlockGlobalExitRoot(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_GLOBAL_EXIT_ROOTS, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - // from and to are inclusive func (db *HermezDbReader) GetBlockGlobalExitRoots(fromBlockNo, toBlockNo uint64) ([]common.Hash, error) { c, err := db.tx.Cursor(BLOCK_GLOBAL_EXIT_ROOTS) @@ -825,18 +815,6 @@ func (db *HermezDbReader) GetBlockL1BlockHash(l2BlockNo uint64) (common.Hash, er return common.BytesToHash(bytes), nil } -func (db *HermezDb) TruncateBlockL1BlockHash(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_L1_BLOCK_HASHES, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - // from and to are inclusive func (db *HermezDbReader) GetBlockL1BlockHashes(fromBlockNo, toBlockNo uint64) ([]common.Hash, error) { c, err := db.tx.Cursor(BLOCK_L1_BLOCK_HASHES) @@ -1100,18 +1078,6 @@ func (db *HermezDbReader) GetForkIdBlock(forkId uint64) (uint64, bool, error) { return blockNum, found, err } -func (db *HermezDb) TruncateForkId(fromBatchNum, toBatchNum uint64) error { - for i := fromBatchNum; i <= toBatchNum; i++ { - err := db.tx.Delete(FORKIDS, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - func (db *HermezDb) DeleteForkIdBlock(fromBlockNo, toBlockNo uint64) error { return db.deleteFromBucketWithUintKeysRange(FORKID_BLOCK, fromBlockNo, toBlockNo) } @@ -1295,38 +1261,39 @@ func (db *HermezDbReader) GetBlockL1InfoTreeIndex(blockNumber uint64) (uint64, e return BytesToUint64(v), nil } -func (db *HermezDb) TruncateBlockL1InfoTreeIndex(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_L1_INFO_TREE_INDEX, Uint64ToBytes(i)) - if err != nil { - return err - } - +func (db *HermezDb) WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error { + latestBlockNumber, latestL1Index, err := db.GetLatestBlockL1InfoTreeIndexProgress() + if err != nil { + return err + } + if latestBlockNumber > blockNumber { + return fmt.Errorf("unable to set l1index for block %d because it has already been set for block %d", blockNumber, latestBlockNumber) + } + if l1Index <= latestL1Index { + return nil } - return nil + k := Uint64ToBytes(blockNumber) + v := Uint64ToBytes(l1Index) + return db.tx.Put(BLOCK_L1_INFO_TREE_INDEX_PROGRESS, k, v) } -func (db *HermezDbReader) GetLatestL1InfoTreeIndex() (uint64, error) { - c, err := db.tx.Cursor(BLOCK_L1_INFO_TREE_INDEX) +func (db *HermezDbReader) GetLatestBlockL1InfoTreeIndexProgress() (uint64, uint64, error) { + c, err := db.tx.Cursor(BLOCK_L1_INFO_TREE_INDEX_PROGRESS) if err != nil { - return 0, err + return 0, 0, err } defer c.Close() - var k, v []byte - for k, v, err = c.Last(); k != nil; k, v, err = c.Prev() { - if err != nil { - break - } - - if len(v) != 0 && v[0] == 1 { - blockNum := BytesToUint64(k[:8]) - return blockNum, nil - } + k, v, err := c.Last() + if err != nil { + return 0, 0, err } + return BytesToUint64(k), BytesToUint64(v), nil +} - return 0, nil +func (db *HermezDb) DeleteBlockL1InfoTreeIndexesProgress(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(BLOCK_L1_INFO_TREE_INDEX_PROGRESS, fromBlockNum, toBlockNum) } func (db *HermezDb) WriteL1InjectedBatch(batch *types.L1InjectedBatch) error { diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index b58f0e8b75e..153d0473a5d 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -67,6 +67,7 @@ type HermezDb interface { WriteBatchGlobalExitRoot(batchNumber uint64, ger types.GerUpdate) error WriteIntermediateTxStateRoot(l2BlockNumber uint64, txHash common.Hash, rpcRoot common.Hash) error WriteBlockL1InfoTreeIndex(blockNumber uint64, l1Index uint64) error + WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error WriteLatestUsedGer(batchNo uint64, ger common.Hash) error WriteLocalExitRootForBatchNo(batchNo uint64, localExitRoot common.Hash) error } @@ -198,7 +199,7 @@ func SpawnStageBatches( blocksWritten := uint64(0) highestHashableL2BlockNo := uint64(0) - highestL1InfoTreeIndex, err := stages.GetStageProgress(tx, stages.HighestUsedL1InfoIndex) + _, highestL1InfoTreeIndex, err := hermezDb.GetLatestBlockL1InfoTreeIndexProgress() if err != nil { return fmt.Errorf("failed to get highest used l1 info index, %w", err) } @@ -424,7 +425,10 @@ LOOP: } if blocksWritten != prevAmountBlocksWritten && blocksWritten%STAGE_PROGRESS_SAVE == 0 { - if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId); err != nil { + if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId); err != nil { + return err + } + if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(lastBlockHeight, highestL1InfoTreeIndex); err != nil { return err } @@ -451,7 +455,10 @@ LOOP: return nil } - if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId); err != nil { + if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId); err != nil { + return err + } + if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(lastBlockHeight, highestL1InfoTreeIndex); err != nil { return err } @@ -468,7 +475,7 @@ LOOP: return nil } -func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId uint64) error { +func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId uint64) error { var err error // store the highest hashable block number if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, highestHashableL2BlockNo); err != nil { @@ -484,10 +491,6 @@ func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, h return fmt.Errorf("save stage progress error: %v", err) } - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, uint64(highestL1InfoTreeIndex)); err != nil { - return err - } - // save the latest verified batch number as well just in case this node is upgraded // to a sequencer in the future if err := stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, highestSeenBatchNo); err != nil { @@ -696,13 +699,8 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // store the highest used l1 info index// ///////////////////////////////////////// - highestL1InfoTreeIndex, err := hermezDb.GetLatestL1InfoTreeIndex() - if err != nil { - return fmt.Errorf("get latest l1 info tree index error: %v", err) - } - - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, highestL1InfoTreeIndex); err != nil { - return err + if err := hermezDb.DeleteBlockL1InfoTreeIndexesProgress(fromBlock, toBlock); err != nil { + return nil } if err := hermezDb.DeleteBlockL1InfoTreeIndexes(fromBlock, toBlock); err != nil { diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 95858c46eda..cc2a76268e9 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -171,7 +171,6 @@ func SpawnSequencingStage( return err } - var anyOverflow bool ibs := state.New(sdb.stateReader) getHashFn := core.GetHashFn(header, func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) }) blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) @@ -222,15 +221,14 @@ func SpawnSequencingStage( log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } - var receipt *types.Receipt - var execResult *core.ExecutionResult for i, transaction := range batchState.blockState.transactionsForInclusion { txHash := transaction.Hash() effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { + receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker) + if err != nil { if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") } @@ -304,8 +302,11 @@ func SpawnSequencingStage( if err = sdb.hermezDb.WriteBlockL1InfoTreeIndex(blockNumber, l1TreeUpdateIndex); err != nil { return err } + if err = sdb.hermezDb.WriteBlockL1InfoTreeIndexProgress(blockNumber, infoTreeIndexProgress); err != nil { + return err + } - block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, infoTreeIndexProgress) + block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index bfae01f81a6..aa7a5debfd1 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -73,7 +73,6 @@ func doFinishBlockAndUpdateState( batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, - l1InfoIndex uint64, ) (*types.Block, error) { thisBlockNumber := header.Number.Uint64() @@ -86,7 +85,7 @@ func doFinishBlockAndUpdateState( return nil, err } - if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, l1InfoIndex, false); err != nil { + if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, false); err != nil { return nil, err } diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 5c3100788ab..1a29a28c5b6 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -75,7 +75,7 @@ func processInjectedInitialBatch( effectiveGases: []uint8{effectiveGas}, } - _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash, 0) + _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash) return err } diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index 3072944bebf..5fb3f7d2458 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -63,8 +63,7 @@ func unwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.Stage return err } - //TODO: why l1infoindex is 1? - if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, 1, true); err != nil { + if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, true); err != nil { return err } @@ -114,15 +113,19 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("truncate latest used gers error: %v", err) } // only seq - if err = hermezDb.TruncateBlockGlobalExitRoot(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockGlobalExitRoots(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block ger error: %v", err) } // only seq - if err = hermezDb.TruncateBlockL1BlockHash(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockL1BlockHashes(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block l1 block hash error: %v", err) } // only seq - if err = hermezDb.TruncateBlockL1InfoTreeIndex(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockL1InfoTreeIndexes(u.UnwindPoint+1, s.BlockNumber); err != nil { + return fmt.Errorf("truncate block l1 info tree index error: %v", err) + } + // only seq + if err = hermezDb.DeleteBlockL1InfoTreeIndexesProgress(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block l1 info tree index error: %v", err) } // only seq @@ -130,7 +133,7 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("truncate block batches error: %v", err) } // only seq - if err = hermezDb.TruncateForkId(fromBatchForForkIdDeletion, toBatch); err != nil { + if err = hermezDb.DeleteForkIds(fromBatchForForkIdDeletion, toBatch); err != nil { return fmt.Errorf("truncate fork id error: %v", err) } // only seq diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index e76f1738ff9..b23412c9801 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -253,7 +253,7 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, prop // we keep track of this here shouldWriteGerToContract = true - if infoTreeIndexProgress, err = stages.GetStageProgress(sdb.tx, stages.HighestUsedL1InfoIndex); err != nil { + if _, infoTreeIndexProgress, err = sdb.hermezDb.GetLatestBlockL1InfoTreeIndexProgress(); err != nil { return } @@ -312,7 +312,7 @@ func calculateNextL1TreeUpdateToUse(lastInfoIndex uint64, hermezDb *hermez_db.He return nextL1Index, l1Info, nil } -func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1InfoIndex uint64, unwinding bool) error { +func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, unwinding bool) error { // now update stages that will be used later on in stageloop.go and other stages. As we're the sequencer // we won't have headers stage for example as we're already writing them here if err := stages.SaveStageProgress(tx, stages.Execution, newHeight); err != nil { @@ -324,9 +324,6 @@ func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1In if err := stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, newBatch); err != nil { return err } - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, l1InfoIndex); err != nil { - return err - } if !unwinding { if err := stages.SaveStageProgress(tx, stages.IntermediateHashes, newHeight); err != nil { From b372a7cb9dc45d35edec997093dbccbe1c0e2ea6 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Tue, 6 Aug 2024 14:37:00 +0000 Subject: [PATCH 28/33] add flag for verification timeout --- cmd/utils/flags.go | 5 +++++ eth/ethconfig/config_zkevm.go | 1 + turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 7 +++++++ .../legacy_executor_verifier.go | 15 +++++++++++++-- zk/stages/stage_sequence_execute.go | 2 +- 6 files changed, 28 insertions(+), 3 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 6df29a6d578..aa620255c38 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -471,6 +471,11 @@ var ( Usage: "Batch seal time. Defaults to 12s", Value: "12s", } + SequencerBatchVerificationTimeout = cli.StringFlag{ + Name: "zkevm.sequencer-batch-verification-timeout", + Usage: "This is a maximum time that a batch verification could take. Including retries. This could be interpreted as maximum that that the sequencer can run without executor. Setting it to 0s will mean infinite timeout. Defaults to 30min", + Value: "30m", + } SequencerHaltOnBatchNumber = cli.Uint64Flag{ Name: "zkevm.sequencer-halt-on-batch-number", Usage: "Halt the sequencer on this batch number", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index dbe1d4fb48c..f960bece63d 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -31,6 +31,7 @@ type Zk struct { DatastreamVersion int SequencerBlockSealTime time.Duration SequencerBatchSealTime time.Duration + SequencerBatchVerificationTimeout time.Duration SequencerHaltOnBatchNumber uint64 ExecutorUrls []string ExecutorStrictMode bool diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index a482832f354..706c22a92c4 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -191,6 +191,7 @@ var DefaultFlags = []cli.Flag{ &utils.SmtRegenerateInMemory, &utils.SequencerBlockSealTime, &utils.SequencerBatchSealTime, + &utils.SequencerBatchVerificationTimeout, &utils.SequencerHaltOnBatchNumber, &utils.ExecutorUrls, &utils.ExecutorStrictMode, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index c5a9e4ba175..d2ac35ed3bb 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -76,6 +76,12 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerBatchSealTimeVal)) } + sequencerBatchVerificationTimeoutVal := ctx.String(utils.SequencerBatchVerificationTimeout.Name) + sequencerBatchVerificationTimeout, err := time.ParseDuration(sequencerBatchVerificationTimeoutVal) + if err != nil { + panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerBatchSealTimeVal)) + } + effectiveGasPriceForEthTransferVal := ctx.Float64(utils.EffectiveGasPriceForEthTransfer.Name) effectiveGasPriceForErc20TransferVal := ctx.Float64(utils.EffectiveGasPriceForErc20Transfer.Name) effectiveGasPriceForContractInvocationVal := ctx.Float64(utils.EffectiveGasPriceForContractInvocation.Name) @@ -122,6 +128,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { SmtRegenerateInMemory: ctx.Bool(utils.SmtRegenerateInMemory.Name), SequencerBlockSealTime: sequencerBlockSealTime, SequencerBatchSealTime: sequencerBatchSealTime, + SequencerBatchVerificationTimeout: sequencerBatchVerificationTimeout, SequencerHaltOnBatchNumber: ctx.Uint64(utils.SequencerHaltOnBatchNumber.Name), ExecutorUrls: strings.Split(strings.ReplaceAll(ctx.String(utils.ExecutorUrls.Name), " ", ""), ","), ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name), diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 6dd3f663ddf..4f48e58877d 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -34,9 +34,14 @@ type VerifierRequest struct { StateRoot common.Hash Counters map[string]int creationTime time.Time + timeout time.Duration } func NewVerifierRequest(forkId, batchNumber uint64, blockNumbers []uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { + return NewVerifierRequestWithTimeout(forkId, batchNumber, blockNumbers, stateRoot, counters, 0) +} + +func NewVerifierRequestWithTimeout(forkId, batchNumber uint64, blockNumbers []uint64, stateRoot common.Hash, counters map[string]int, timeout time.Duration) *VerifierRequest { return &VerifierRequest{ BatchNumber: batchNumber, BlockNumbers: blockNumbers, @@ -44,11 +49,16 @@ func NewVerifierRequest(forkId, batchNumber uint64, blockNumbers []uint64, state StateRoot: stateRoot, Counters: counters, creationTime: time.Now(), + timeout: timeout, } } func (vr *VerifierRequest) IsOverdue() bool { - return time.Since(vr.creationTime) > time.Duration(30*time.Minute) + if vr.timeout == 0 { + return false + } + + return time.Since(vr.creationTime) > vr.timeout } func (vr *VerifierRequest) GetLastBlockNumber() uint64 { @@ -122,10 +132,11 @@ func (v *LegacyExecutorVerifier) StartAsyncVerification( counters map[string]int, blockNumbers []uint64, useRemoteExecutor bool, + requestTimeout time.Duration, ) { var promise *Promise[*VerifierBundle] - request := NewVerifierRequest(forkId, batchNumber, blockNumbers, stateRoot, counters) + request := NewVerifierRequestWithTimeout(forkId, batchNumber, blockNumbers, stateRoot, counters, requestTimeout) if useRemoteExecutor { promise = v.VerifyAsync(request, blockNumbers) } else { diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index cc2a76268e9..6b2c72f950e 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -349,7 +349,7 @@ func SpawnSequencingStage( } defer sdb.tx.Rollback() - cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, batchState.hasExecutorForThisBatch) + cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, batchState.hasExecutorForThisBatch, batchContext.cfg.zk.SequencerBatchVerificationTimeout) // check for new responses from the verifier needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) From 417f6038afb46b1bb434a191b663861dde91bf0d Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 7 Aug 2024 07:09:08 +0000 Subject: [PATCH 29/33] remove unused flag --- .github/workflows/ci_zkevm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 555ea31ee5f..810e82579e9 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -94,7 +94,7 @@ jobs: - name: Remove unused flags working-directory: ./kurtosis-cdk run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk From 53af394d4414b1afe7a90c89dbce2725edf11ea8 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 7 Aug 2024 07:56:19 +0000 Subject: [PATCH 30/33] update kurtosis params file --- .github/workflows/ci_zkevm.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 810e82579e9..3b4dfcdcf32 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -99,13 +99,13 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' cdk-erigon-sequencer-params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml + /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' params.yml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file cdk-erigon-sequencer-params.yml --image-download always . + run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . - name: Override gas limit for test transactions working-directory: ./kurtosis-cdk From 85dcc92b56cad17096cc216b170179e9a8925875 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 7 Aug 2024 08:15:07 +0000 Subject: [PATCH 31/33] update kurtosis --- .github/workflows/ci_zkevm.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 3b4dfcdcf32..015036965b0 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -72,7 +72,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: main + ref: v0.2.3 path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -99,13 +99,13 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' cdk-erigon-sequencer-params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' cdk-erigon-sequencer-params.yml + /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' cdk-erigon-sequencer-params.yml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + run: kurtosis run --enclave cdk-v1 --args-file cdk-erigon-sequencer-params.yml --image-download always . - name: Override gas limit for test transactions working-directory: ./kurtosis-cdk From 8bb914144490b8b614774e6c395b882631e6a5d7 Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 7 Aug 2024 08:30:20 +0000 Subject: [PATCH 32/33] update kurtosis tests --- .github/workflows/ci_zkevm.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 015036965b0..ca346f713a6 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -72,7 +72,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: v0.2.3 + ref: 7643bbc4b6bfc44e499015ab229fba087bf79d4c path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -94,18 +94,18 @@ jobs: - name: Remove unused flags working-directory: ./kurtosis-cdk run: | - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' cdk-erigon-sequencer-params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml + /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' params.yml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file cdk-erigon-sequencer-params.yml --image-download always . + run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . - name: Override gas limit for test transactions working-directory: ./kurtosis-cdk From ac60be32f21bb9f024019b9252292b28372054ba Mon Sep 17 00:00:00 2001 From: Kamen Stoykov Date: Wed, 7 Aug 2024 12:26:58 +0000 Subject: [PATCH 33/33] make Counter & GER to be per-block stored --- zk/hermez_db/db.go | 39 ++++++++----------- zk/stages/stage_batches.go | 6 +-- zk/stages/stage_sequence_execute.go | 19 +-------- zk/stages/stage_sequence_execute_batch.go | 10 ++--- zk/stages/stage_sequence_execute_blocks.go | 31 ++++++++++++++- .../stage_sequence_execute_injected_batch.go | 9 ++++- zk/stages/stage_sequence_execute_unwind.go | 19 +++------ 7 files changed, 67 insertions(+), 66 deletions(-) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index f568dfd3a57..1ce820654a2 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -1361,16 +1361,21 @@ func (db *HermezDbReader) GetWitness(batchNumber uint64) ([]byte, error) { return v, nil } -func (db *HermezDb) WriteBatchCounters(batchNumber uint64, counters map[string]int) error { +func (db *HermezDb) WriteBatchCounters(blockNumber uint64, counters map[string]int) error { countersJson, err := json.Marshal(counters) if err != nil { return err } - return db.tx.Put(BATCH_COUNTERS, Uint64ToBytes(batchNumber), countersJson) + return db.tx.Put(BATCH_COUNTERS, Uint64ToBytes(blockNumber), countersJson) } -func (db *HermezDbReader) GetBatchCounters(batchNumber uint64) (countersMap map[string]int, found bool, err error) { - v, err := db.tx.GetOne(BATCH_COUNTERS, Uint64ToBytes(batchNumber)) +func (db *HermezDbReader) GetLatestBatchCounters(batchNumber uint64) (countersMap map[string]int, found bool, err error) { + batchBlockNumbers, err := db.GetL2BlockNosByBatch(batchNumber) + if err != nil { + return nil, false, err + } + + v, err := db.tx.GetOne(BATCH_COUNTERS, Uint64ToBytes(batchBlockNumbers[len(batchBlockNumbers)-1])) if err != nil { return nil, false, err } @@ -1385,6 +1390,10 @@ func (db *HermezDbReader) GetBatchCounters(batchNumber uint64) (countersMap map[ return countersMap, found, nil } +func (db *HermezDb) DeleteBatchCounters(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(BATCH_COUNTERS, fromBlockNum, toBlockNum) +} + // WriteL1BatchData stores the data for a given L1 batch number // coinbase = 20 bytes // batchL2Data = remaining @@ -1416,9 +1425,8 @@ func (db *HermezDbReader) GetLastL1BatchData() (uint64, error) { return BytesToUint64(k), nil } -func (db *HermezDb) WriteLatestUsedGer(batchNo uint64, ger common.Hash) error { - batchBytes := Uint64ToBytes(batchNo) - return db.tx.Put(LATEST_USED_GER, batchBytes, ger.Bytes()) +func (db *HermezDb) WriteLatestUsedGer(blockNumber uint64, ger common.Hash) error { + return db.tx.Put(LATEST_USED_GER, Uint64ToBytes(blockNumber), ger.Bytes()) } func (db *HermezDbReader) GetLatestUsedGer() (uint64, common.Hash, error) { @@ -1439,21 +1447,8 @@ func (db *HermezDbReader) GetLatestUsedGer() (uint64, common.Hash, error) { return batchNo, ger, nil } -func (db *HermezDb) TruncateLatestUsedGers(fromBatch uint64) error { - latestBatch, _, err := db.GetLatestUsedGer() - if err != nil { - return err - } - - for i := fromBatch; i <= latestBatch; i++ { - err := db.tx.Delete(LATEST_USED_GER, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil +func (db *HermezDb) DeleteLatestUsedGers(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(LATEST_USED_GER, fromBlockNum, toBlockNum) } func (db *HermezDb) WriteSmtDepth(l2BlockNo, depth uint64) error { diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index 153d0473a5d..ca74f4cf681 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -68,7 +68,7 @@ type HermezDb interface { WriteIntermediateTxStateRoot(l2BlockNumber uint64, txHash common.Hash, rpcRoot common.Hash) error WriteBlockL1InfoTreeIndex(blockNumber uint64, l1Index uint64) error WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error - WriteLatestUsedGer(batchNo uint64, ger common.Hash) error + WriteLatestUsedGer(blockNo uint64, ger common.Hash) error WriteLocalExitRootForBatchNo(batchNo uint64, localExitRoot common.Hash) error } @@ -621,7 +621,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c return fmt.Errorf("delete global exit roots error: %v", err) } - if err = hermezDb.TruncateLatestUsedGers(fromBatch); err != nil { + if err = hermezDb.DeleteLatestUsedGers(fromBlock, toBlock); err != nil { return fmt.Errorf("delete latest used gers error: %v", err) } @@ -875,7 +875,7 @@ func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, // we always want the last written GER in this table as it's at the batch level, so it can and should // be overwritten if !l1InfoTreeIndexReused && didStoreGer { - if err := hermezDb.WriteLatestUsedGer(l2Block.BatchNumber, l2Block.GlobalExitRoot); err != nil { + if err := hermezDb.WriteLatestUsedGer(l2Block.L2BlockNumber, l2Block.GlobalExitRoot); err != nil { return fmt.Errorf("write latest used ger error: %w", err) } } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 6b2c72f950e..1bbf298a5ec 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -299,14 +299,7 @@ func SpawnSequencingStage( } } - if err = sdb.hermezDb.WriteBlockL1InfoTreeIndex(blockNumber, l1TreeUpdateIndex); err != nil { - return err - } - if err = sdb.hermezDb.WriteBlockL1InfoTreeIndexProgress(blockNumber, infoTreeIndexProgress); err != nil { - return err - } - - block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash) + block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters) if err != nil { return err } @@ -330,16 +323,6 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(batchState.blockState.builtBlockElements.transactions))) } - err = sdb.hermezDb.WriteBatchCounters(batchState.batchNumber, batchCounters.CombineCollectorsNoChanges().UsedAsMap()) - if err != nil { - return err - } - - err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(batchState.batchNumber) - if err != nil { - return err - } - // add a check to the verifier and also check for responses batchState.onBuiltBlock(blockNumber) diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index 8923a0318fc..5bc6bf3c7e1 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -23,7 +23,7 @@ func prepareBatchNumber(lastBatch uint64, isLastBatchPariallyProcessed bool) uin func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, isLastBatchPariallyProcessed bool) (*vm.BatchCounterCollector, error) { var intermediateUsedCounters *vm.Counters if isLastBatchPariallyProcessed { - intermediateCountersMap, found, err := batchContext.sdb.hermezDb.GetBatchCounters(batchState.batchNumber) + intermediateCountersMap, found, err := batchContext.sdb.hermezDb.GetLatestBatchCounters(batchState.batchNumber) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, this if err = batchContext.sdb.hermezDb.WriteInvalidBatch(batchState.batchNumber); err != nil { return false, err } - if err = batchContext.sdb.hermezDb.WriteBatchCounters(batchState.batchNumber, map[string]int{}); err != nil { + if err = batchContext.sdb.hermezDb.WriteBatchCounters(currentBlock.NumberU64(), map[string]int{}); err != nil { return false, err } if err = batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(batchState.batchNumber); err != nil { @@ -140,10 +140,10 @@ func updateStreamAndCheckRollback( func runBatchLastSteps( batchContext *BatchContext, thisBatch uint64, - lastStartedBn uint64, + blockNumber uint64, batchCounters *vm.BatchCounterCollector, ) error { - l1InfoIndex, err := batchContext.sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) + l1InfoIndex, err := batchContext.sdb.hermezDb.GetBlockL1InfoTreeIndex(blockNumber) if err != nil { return err } @@ -155,7 +155,7 @@ func runBatchLastSteps( log.Info(fmt.Sprintf("[%s] counters consumed", batchContext.s.LogPrefix()), "batch", thisBatch, "counts", counters.UsedAsString()) - if err = batchContext.sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { + if err = batchContext.sdb.hermezDb.WriteBatchCounters(blockNumber, counters.UsedAsMap()); err != nil { return err } if err := batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index aa7a5debfd1..495e9114846 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/smt/pkg/blockinfo" "github.com/ledgerwatch/erigon/zk/erigon_db" @@ -55,7 +56,7 @@ func handleStateForNewBlockStarting( if l1BlockHash == (common.Hash{}) { // not in the contract so let's write it! ibs.WriteGerManagerL1BlockHash(l1info.GER, l1info.ParentHash) - if err := hermezDb.WriteLatestUsedGer(batchNumber, l1info.GER); err != nil { + if err := hermezDb.WriteLatestUsedGer(blockNumber, l1info.GER); err != nil { return err } } @@ -73,6 +74,9 @@ func doFinishBlockAndUpdateState( batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, + l1TreeUpdateIndex uint64, + infoTreeIndexProgress uint64, + batchCounters *vm.BatchCounterCollector, ) (*types.Block, error) { thisBlockNumber := header.Number.Uint64() @@ -80,7 +84,7 @@ func doFinishBlockAndUpdateState( batchContext.cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) } - block, err := finaliseBlock(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash) + block, err := finaliseBlock(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters) if err != nil { return nil, err } @@ -108,7 +112,18 @@ func finaliseBlock( batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, + l1TreeUpdateIndex uint64, + infoTreeIndexProgress uint64, + batchCounters *vm.BatchCounterCollector, ) (*types.Block, error) { + thisBlockNumber := newHeader.Number.Uint64() + if err := batchContext.sdb.hermezDb.WriteBlockL1InfoTreeIndex(thisBlockNumber, l1TreeUpdateIndex); err != nil { + return nil, err + } + if err := batchContext.sdb.hermezDb.WriteBlockL1InfoTreeIndexProgress(thisBlockNumber, infoTreeIndexProgress); err != nil { + return nil, err + } + stateWriter := state.NewPlainStateWriter(batchContext.sdb.tx, batchContext.sdb.tx, newHeader.Number.Uint64()).SetAccumulator(batchContext.cfg.accumulator) chainReader := stagedsync.ChainReader{ Cfg: *batchContext.cfg.chainConfig, @@ -226,6 +241,18 @@ func finaliseBlock( return nil, fmt.Errorf("write block batch error: %v", err) } + // write batch counters + err = batchContext.sdb.hermezDb.WriteBatchCounters(newNum.Uint64(), batchCounters.CombineCollectorsNoChanges().UsedAsMap()) + if err != nil { + return nil, err + } + + // write partially processed + err = batchContext.sdb.hermezDb.WriteIsBatchPartiallyProcessed(batchState.batchNumber) + if err != nil { + return nil, err + } + // this is actually account + storage indices stages quitCh := batchContext.ctx.Done() from := newNum.Uint64() diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 1a29a28c5b6..323b7a0f2f9 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -74,9 +74,14 @@ func processInjectedInitialBatch( executionResults: []*core.ExecutionResult{execResult}, effectiveGases: []uint8{effectiveGas}, } + batchCounters := vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), nil) - _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash) - return err + if _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash, 0, 0, batchCounters); err != nil { + return err + } + + // deleting the partially processed flag + return batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(injectedBatchBatchNumber) } func handleInjectedBatch( diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index 5fb3f7d2458..46c0a58846f 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -4,13 +4,10 @@ import ( "context" "fmt" - "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/common/hexutility" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/zk/hermez_db" ) @@ -109,7 +106,7 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw } // only seq - if err = hermezDb.TruncateLatestUsedGers(fromBatch); err != nil { + if err = hermezDb.DeleteLatestUsedGers(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate latest used gers error: %v", err) } // only seq @@ -137,6 +134,10 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("truncate fork id error: %v", err) } // only seq + if err = hermezDb.DeleteBatchCounters(u.UnwindPoint+1, s.BlockNumber); err != nil { + return fmt.Errorf("truncate block batches error: %v", err) + } + // only seq if err = hermezDb.TruncateIsBatchPartiallyProcessed(fromBatch, toBatch); err != nil { return fmt.Errorf("truncate fork id error: %v", err) } @@ -148,13 +149,3 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return nil } - -func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { - var address common.Address - copy(address[:], key) - if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err2 := db.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { - copy(acc.CodeHash[:], codeHash) - } - } -}