diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 555ea31ee5f..ca346f713a6 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -72,7 +72,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: main + ref: 7643bbc4b6bfc44e499015ab229fba087bf79d4c path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -99,13 +99,13 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' cdk-erigon-sequencer-params.yml - /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' cdk-erigon-sequencer-params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml + /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' params.yml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file cdk-erigon-sequencer-params.yml --image-download always . + run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . - name: Override gas limit for test transactions working-directory: ./kurtosis-cdk diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 144a8c8de10..aa620255c38 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -471,10 +471,10 @@ var ( Usage: "Batch seal time. Defaults to 12s", Value: "12s", } - SequencerNonEmptyBatchSealTime = cli.StringFlag{ - Name: "zkevm.sequencer-non-empty-batch-seal-time", - Usage: "Batch seal time. Defaults to 3s", - Value: "3s", + SequencerBatchVerificationTimeout = cli.StringFlag{ + Name: "zkevm.sequencer-batch-verification-timeout", + Usage: "This is a maximum time that a batch verification could take. Including retries. This could be interpreted as maximum that that the sequencer can run without executor. Setting it to 0s will mean infinite timeout. Defaults to 30min", + Value: "30m", } SequencerHaltOnBatchNumber = cli.Uint64Flag{ Name: "zkevm.sequencer-halt-on-batch-number", diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index 1208cb6782a..26ac082e7be 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -296,37 +296,6 @@ func PrepareBlockTxExecution( return &blockContextImpl, excessDataGas, &blockGer, &blockL1BlockHash, nil } -func FinalizeBlockExecutionWithHistoryWrite( - engine consensus.Engine, stateReader state.StateReader, - header *types.Header, txs types.Transactions, uncles []*types.Header, - stateWriter state.WriterWithChangeSets, cc *chain.Config, - ibs *state.IntraBlockState, receipts types.Receipts, - withdrawals []*types.Withdrawal, headerReader consensus.ChainHeaderReader, - isMining bool, excessDataGas *big.Int, -) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { - newBlock, newTxs, newReceipt, err = FinalizeBlockExecution( - engine, - stateReader, - header, - txs, - uncles, - stateWriter, - cc, - ibs, - receipts, - withdrawals, - headerReader, - isMining, - excessDataGas, - ) - - if err := stateWriter.WriteHistory(); err != nil { - return nil, nil, nil, fmt.Errorf("writing history for block %d failed: %w", header.Number.Uint64(), err) - } - - return newBlock, newTxs, newReceipt, nil -} - func CreateReceiptForBlockInfoTree(receipt *types.Receipt, chainConfig *chain.Config, blockNum uint64, execResult *ExecutionResult) *types.Receipt { // [hack]TODO: remove this after bug is fixed localReceipt := receipt.Clone() diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index 49bd91261e8..3c868137208 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -234,7 +234,7 @@ func (bcc *BatchCounterCollector) CombineCollectors(verifyMerkleProof bool) (Cou // rlp level counters and execution level counters // this one returns the counters as they are so far, without adding processBatchLevelData, processChangeL2Block and decodeChangeL2BlockTx // used to save batch counter progress without adding the said counters twice -func (bcc *BatchCounterCollector) CombineCollectorsNoChanges(verifyMerkleProof bool) Counters { +func (bcc *BatchCounterCollector) CombineCollectorsNoChanges() Counters { // combine all the counters we have so far // if we have external coutners use them, otherwise create new diff --git a/eth/backend.go b/eth/backend.go index 1a79ad0900b..cc7ca08233a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -635,7 +635,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { }() + tx, err := backend.chainDB.BeginRw(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + if !config.DeprecatedTxPool.Disable { + // we need to start the pool before stage loop itself + // the pool holds the info about how execution stage should work - as regular or as limbo recovery + if err := backend.txPool2.StartIfNotStarted(ctx, backend.txPool2DB, tx); err != nil { + return nil, err + } + backend.txPool2Fetch.ConnectCore() backend.txPool2Fetch.ConnectSentries() var newTxsBroadcaster *txpool2.NewSlotsStreams @@ -696,12 +708,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.ethBackendRPC, backend.miningRPC, backend.stateChangesClient = ethBackendRPC, miningRPC, stateDiffClient - tx, err := backend.chainDB.BeginRw(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - // create buckets if err := createBuckets(tx); err != nil { return nil, err @@ -833,7 +839,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.engine, ) - var legacyExecutors []legacy_executor_verifier.ILegacyExecutor + var legacyExecutors []*legacy_executor_verifier.Executor = make([]*legacy_executor_verifier.Executor, 0, len(cfg.ExecutorUrls)) if len(cfg.ExecutorUrls) > 0 && cfg.ExecutorUrls[0] != "" { levCfg := legacy_executor_verifier.Config{ GrpcUrls: cfg.ExecutorUrls, @@ -853,7 +859,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { backend.chainConfig, backend.chainDB, witnessGenerator, - backend.l1Syncer, backend.dataStream, ) @@ -866,12 +871,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // we switch context from being an RPC node to a sequencer backend.txPool2.ForceUpdateLatestBlock(executionProgress) - // we need to start the pool before stage loop itself - // the pool holds the info about how execution stage should work - as regular or as limbo recovery - if err := backend.txPool2.StartIfNotStarted(ctx, backend.txPool2DB, tx); err != nil { - return nil, err - } - l1BlockSyncer := syncer.NewL1Syncer( ctx, ethermanClients, @@ -967,10 +966,6 @@ func createBuckets(tx kv.RwTx) error { return err } - if err := txpool.CreateTxPoolBuckets(tx); err != nil { - return err - } - return nil } diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 5efc0421224..f960bece63d 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -31,7 +31,7 @@ type Zk struct { DatastreamVersion int SequencerBlockSealTime time.Duration SequencerBatchSealTime time.Duration - SequencerNonEmptyBatchSealTime time.Duration + SequencerBatchVerificationTimeout time.Duration SequencerHaltOnBatchNumber uint64 ExecutorUrls []string ExecutorStrictMode bool diff --git a/eth/stagedsync/stage_indexes_zkevm.go b/eth/stagedsync/stage_indexes_zkevm.go new file mode 100644 index 00000000000..716153d949d --- /dev/null +++ b/eth/stagedsync/stage_indexes_zkevm.go @@ -0,0 +1,9 @@ +package stagedsync + +import ( + "github.com/gateway-fm/cdk-erigon-lib/kv" +) + +func PromoteHistory(logPrefix string, tx kv.RwTx, changesetBucket string, start, stop uint64, cfg HistoryCfg, quit <-chan struct{}) error { + return promoteHistory(logPrefix, tx, changesetBucket, start, stop, cfg, quit) +} diff --git a/eth/stagedsync/stages/stages_zk.go b/eth/stagedsync/stages/stages_zk.go index c5cb45b2d98..4ac4583fa82 100644 --- a/eth/stagedsync/stages/stages_zk.go +++ b/eth/stagedsync/stages/stages_zk.go @@ -28,7 +28,7 @@ var ( ForkId SyncStage = "ForkId" L1SequencerSync SyncStage = "L1SequencerSync" L1InfoTree SyncStage = "L1InfoTree" - HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" - SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" - L1BlockSync SyncStage = "L1BlockSync" + // HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" + SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" + L1BlockSync SyncStage = "L1BlockSync" ) diff --git a/smt/pkg/db/mdbx.go b/smt/pkg/db/mdbx.go index 6060a09274c..a034cdf5aa1 100644 --- a/smt/pkg/db/mdbx.go +++ b/smt/pkg/db/mdbx.go @@ -30,6 +30,8 @@ const TableAccountValues = "HermezSmtAccountValues" const TableMetadata = "HermezSmtMetadata" const TableHashKey = "HermezSmtHashKey" +var HermezSmtTables = []string{TableSmt, TableStats, TableAccountValues, TableMetadata, TableHashKey} + type EriDb struct { kvTx kv.RwTx tx SmtDbTx diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index d9b77ca91d0..706c22a92c4 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -191,7 +191,7 @@ var DefaultFlags = []cli.Flag{ &utils.SmtRegenerateInMemory, &utils.SequencerBlockSealTime, &utils.SequencerBatchSealTime, - &utils.SequencerNonEmptyBatchSealTime, + &utils.SequencerBatchVerificationTimeout, &utils.SequencerHaltOnBatchNumber, &utils.ExecutorUrls, &utils.ExecutorStrictMode, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index caa6a8d88b9..d2ac35ed3bb 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -76,10 +76,10 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerBatchSealTimeVal)) } - sequencerNonEmptyBatchSealTimeVal := ctx.String(utils.SequencerNonEmptyBatchSealTime.Name) - sequencerNonEmptyBatchSealTime, err := time.ParseDuration(sequencerNonEmptyBatchSealTimeVal) + sequencerBatchVerificationTimeoutVal := ctx.String(utils.SequencerBatchVerificationTimeout.Name) + sequencerBatchVerificationTimeout, err := time.ParseDuration(sequencerBatchVerificationTimeoutVal) if err != nil { - panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerNonEmptyBatchSealTimeVal)) + panic(fmt.Sprintf("could not parse sequencer batch seal time timeout value %s", sequencerBatchSealTimeVal)) } effectiveGasPriceForEthTransferVal := ctx.Float64(utils.EffectiveGasPriceForEthTransfer.Name) @@ -128,7 +128,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { SmtRegenerateInMemory: ctx.Bool(utils.SmtRegenerateInMemory.Name), SequencerBlockSealTime: sequencerBlockSealTime, SequencerBatchSealTime: sequencerBatchSealTime, - SequencerNonEmptyBatchSealTime: sequencerNonEmptyBatchSealTime, + SequencerBatchVerificationTimeout: sequencerBatchVerificationTimeout, SequencerHaltOnBatchNumber: ctx.Uint64(utils.SequencerHaltOnBatchNumber.Name), ExecutorUrls: strings.Split(strings.ReplaceAll(ctx.String(utils.ExecutorUrls.Name), " ", ""), ","), ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name), diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 8e712cd244a..f1a77cb86d3 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -117,7 +117,6 @@ func NewSequencerZkStages(ctx context.Context, zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), - zkStages.StageSequencerInterhashesCfg(db, notifications.Accumulator), zkStages.StageSequenceBlocksCfg( db, cfg.Prune, @@ -139,11 +138,11 @@ func NewSequencerZkStages(ctx context.Context, cfg.Zk, txPool, txPoolDb, + verifier, uint16(cfg.YieldSize), ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), - zkStages.StageSequencerExecutorVerifyCfg(db, verifier, txPool, controlServer.ChainConfig, cfg.Zk), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 47a357c6fe8..0d3fd1129d5 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -43,6 +43,7 @@ type DataStreamServer struct { stream *datastreamer.StreamServer chainId uint64 highestBlockWritten, + highestClosedBatchWritten, highestBatchWritten *uint64 } @@ -118,7 +119,33 @@ func NewDataStreamEntries(size int) *DataStreamEntries { } } -func (srv *DataStreamServer) CommitEntriesToStreamProto(entries []DataStreamEntryProto, latestBlockNum, latestBatchNum *uint64) error { +func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { + if err := srv.stream.CommitAtomicOp(); err != nil { + return err + } + + // copy the values in case they are changed outside the function + // pointers are used for easier check if we should set check them from the DS or not + // since 0 is a valid number, we can't use it + if latestBlockNum != nil { + a := *latestBlockNum + srv.highestBlockWritten = &a + } + + if latestBatchNum != nil { + a := *latestBatchNum + srv.highestBatchWritten = &a + } + + if latestClosedBatch != nil { + a := *latestClosedBatch + srv.highestClosedBatchWritten = &a + } + + return nil +} + +func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { for _, entry := range entries { entryType := entry.Type() @@ -137,16 +164,6 @@ func (srv *DataStreamServer) CommitEntriesToStreamProto(entries []DataStreamEntr } } } - - if latestBlockNum != nil { - a := *latestBlockNum - srv.highestBlockWritten = &a - } - - if latestBatchNum != nil { - a := *latestBatchNum - srv.highestBatchWritten = &a - } return nil } @@ -454,24 +471,12 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return *srv.highestBatchWritten, nil } - header := srv.stream.GetHeader() - - if header.TotalEntries == 0 { - return 0, nil + entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchStart)) + if err != nil { + return 0, err } - - entryNum := header.TotalEntries - 1 - var err error - var entry datastreamer.FileEntry - for { - entry, err = srv.stream.GetEntry(entryNum) - if err != nil { - return 0, err - } - if entry.Type == datastreamer.EntryType(1) { - break - } - entryNum -= 1 + if !found { + return 0, nil } batch, err := types.UnmarshalBatchStart(entry.Data) @@ -484,6 +489,28 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return batch.Number, nil } +func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { + if srv.highestClosedBatchWritten != nil { + return *srv.highestClosedBatchWritten, nil + } + entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchEnd)) + if err != nil { + return 0, err + } + if !found { + return 0, nil + } + + batch, err := types.UnmarshalBatchEnd(entry.Data) + if err != nil { + return 0, err + } + + srv.highestClosedBatchWritten = &batch.Number + + return batch.Number, nil +} + // must be done on offline server // finds the position of the block bookmark entry and deletes from it onward // blockNumber 10 would return the stream to before block 10 bookmark @@ -523,3 +550,21 @@ func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { return srv.stream.TruncateFile(entryNum) } + +func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { + header := srv.stream.GetHeader() + emtryEntry := datastreamer.FileEntry{} + + // loop will become infinite if using unsigned type + for entryNum := int64(header.TotalEntries - 1); entryNum >= 0; entryNum-- { + entry, err := srv.stream.GetEntry(uint64(entryNum)) + if err != nil { + return emtryEntry, false, err + } + if entry.Type == entryType { + return entry, true, nil + } + } + + return emtryEntry, false, nil +} diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index 0a4b8ddbc68..a7ca647754f 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -75,11 +75,11 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( return err } - if err = srv.CommitEntriesToStreamProto(entries.Entries(), &toBlockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries.Entries()); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&toBlockNum, &batchNum, &batchNum); err != nil { return err } @@ -188,18 +188,18 @@ LOOP: // basically commit once 80% of the entries array is filled if len(entries) >= commitEntryCountLimit { log.Info(fmt.Sprintf("[%s] Commit count reached, committing entries", logPrefix), "block", currentBlockNumber) - if err = srv.CommitEntriesToStreamProto(entries, ¤tBlockNumber, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries); err != nil { return err } entries = make([]DataStreamEntryProto, 0, insertEntryCount) } } - if err = srv.CommitEntriesToStreamProto(entries, &to, &latestbatchNum); err != nil { + if err = srv.commitEntriesToStreamProto(entries); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&to, &batchNum, &latestbatchNum); err != nil { return err } @@ -257,16 +257,16 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( } if batchStartEntries != nil { - if err = srv.CommitEntriesToStreamProto(batchStartEntries.Entries(), &blockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(batchStartEntries.Entries()); err != nil { return err } } - if err = srv.CommitEntriesToStreamProto(blockEntries.Entries(), &blockNum, &batchNum); err != nil { + if err = srv.commitEntriesToStreamProto(blockEntries.Entries()); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.commitAtomicOp(&blockNum, &batchNum, nil); err != nil { return err } @@ -317,11 +317,15 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader func (srv *DataStreamServer) WriteBatchEnd( reader DbReader, - batchNumber, - lastBatchNumber uint64, + batchNumber uint64, stateRoot *common.Hash, localExitRoot *common.Hash, ) (err error) { + lastBatchNumber, err := srv.GetHighestClosedBatch() + if err != nil { + return err + } + gers, err := reader.GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber) if err != nil { return err @@ -337,11 +341,12 @@ func (srv *DataStreamServer) WriteBatchEnd( return err } - if err = srv.CommitEntriesToStreamProto(batchEndEntries, nil, nil); err != nil { + if err = srv.commitEntriesToStreamProto(batchEndEntries); err != nil { return err } - if err = srv.stream.CommitAtomicOp(); err != nil { + // we write only batch end, so dont't update latest block and batch + if err = srv.commitAtomicOp(nil, nil, &batchNumber); err != nil { return err } @@ -381,12 +386,12 @@ func (srv *DataStreamServer) WriteGenesisToStream( } batchEnd := newBatchEndProto(ler, genesis.Root(), 0) - blockNum := uint64(0) - if err = srv.CommitEntriesToStreamProto([]DataStreamEntryProto{batchBookmark, batchStart, l2BlockBookmark, l2Block, batchEnd}, &blockNum, &batchNo); err != nil { + if err = srv.commitEntriesToStreamProto([]DataStreamEntryProto{batchBookmark, batchStart, l2BlockBookmark, l2Block, batchEnd}); err != nil { return err } - err = srv.stream.CommitAtomicOp() + // should be okay to write just zeroes here, but it is a single time in a node start, so no use to risk + err = srv.commitAtomicOp(nil, nil, nil) if err != nil { return err } diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 2690affc4a0..1ce820654a2 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -15,39 +15,79 @@ import ( "github.com/ledgerwatch/log/v3" ) -const L1VERIFICATIONS = "hermez_l1Verifications" // l1blockno, batchno -> l1txhash -const L1SEQUENCES = "hermez_l1Sequences" // l1blockno, batchno -> l1txhash -const FORKIDS = "hermez_forkIds" // batchNo -> forkId -const FORKID_BLOCK = "hermez_forkIdBlock" // forkId -> startBlock -const BLOCKBATCHES = "hermez_blockBatches" // l2blockno -> batchno -const GLOBAL_EXIT_ROOTS = "hermez_globalExitRootsSaved" // GER -> true -const BLOCK_GLOBAL_EXIT_ROOTS = "hermez_globalExitRoots" // l2blockno -> GER -const GLOBAL_EXIT_ROOTS_BATCHES = "hermez_globalExitRoots_batches" // batchkno -> GER -const TX_PRICE_PERCENTAGE = "hermez_txPricePercentage" // txHash -> txPricePercentage -const STATE_ROOTS = "hermez_stateRoots" // l2blockno -> stateRoot -const L1_INFO_TREE_UPDATES = "l1_info_tree_updates" // index -> L1InfoTreeUpdate -const L1_INFO_TREE_UPDATES_BY_GER = "l1_info_tree_updates_by_ger" // GER -> L1InfoTreeUpdate -const BLOCK_L1_INFO_TREE_INDEX = "block_l1_info_tree_index" // block number -> l1 info tree index -const L1_INJECTED_BATCHES = "l1_injected_batches" // index increasing by 1 -> injected batch for the start of the chain -const BLOCK_INFO_ROOTS = "block_info_roots" // block number -> block info root hash -const L1_BLOCK_HASHES = "l1_block_hashes" // l1 block hash -> true -const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block number -> l1 block hash -const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER -const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot -const BATCH_WITNESSES = "hermez_batch_witnesses" // batch number -> witness -const BATCH_COUNTERS = "hermez_batch_counters" // batch number -> counters -const L1_BATCH_DATA = "l1_batch_data" // batch number -> l1 batch data from transaction call data -const REUSED_L1_INFO_TREE_INDEX = "reused_l1_info_tree_index" // block number => const 1 -const LATEST_USED_GER = "latest_used_ger" // batch number -> GER latest used GER -const BATCH_BLOCKS = "batch_blocks" // batch number -> block numbers (concatenated together) -const SMT_DEPTHS = "smt_depths" // block number -> smt depth -const L1_INFO_LEAVES = "l1_info_leaves" // l1 info tree index -> l1 info tree leaf -const L1_INFO_ROOTS = "l1_info_roots" // root hash -> l1 info tree index -const INVALID_BATCHES = "invalid_batches" // batch number -> true -const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch number -> true -const LOCAL_EXIT_ROOTS = "local_exit_roots" // l2 block number -> local exit root -const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id -const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch +const L1VERIFICATIONS = "hermez_l1Verifications" // l1blockno, batchno -> l1txhash +const L1SEQUENCES = "hermez_l1Sequences" // l1blockno, batchno -> l1txhash +const FORKIDS = "hermez_forkIds" // batchNo -> forkId +const FORKID_BLOCK = "hermez_forkIdBlock" // forkId -> startBlock +const BLOCKBATCHES = "hermez_blockBatches" // l2blockno -> batchno +const GLOBAL_EXIT_ROOTS = "hermez_globalExitRootsSaved" // GER -> true +const BLOCK_GLOBAL_EXIT_ROOTS = "hermez_globalExitRoots" // l2blockno -> GER +const GLOBAL_EXIT_ROOTS_BATCHES = "hermez_globalExitRoots_batches" // batchkno -> GER +const TX_PRICE_PERCENTAGE = "hermez_txPricePercentage" // txHash -> txPricePercentage +const STATE_ROOTS = "hermez_stateRoots" // l2blockno -> stateRoot +const L1_INFO_TREE_UPDATES = "l1_info_tree_updates" // index -> L1InfoTreeUpdate +const L1_INFO_TREE_UPDATES_BY_GER = "l1_info_tree_updates_by_ger" // GER -> L1InfoTreeUpdate +const BLOCK_L1_INFO_TREE_INDEX = "block_l1_info_tree_index" // block number -> l1 info tree index +const BLOCK_L1_INFO_TREE_INDEX_PROGRESS = "block_l1_info_tree_progress" // block number -> l1 info tree progress +const L1_INJECTED_BATCHES = "l1_injected_batches" // index increasing by 1 -> injected batch for the start of the chain +const BLOCK_INFO_ROOTS = "block_info_roots" // block number -> block info root hash +const L1_BLOCK_HASHES = "l1_block_hashes" // l1 block hash -> true +const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block number -> l1 block hash +const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER +const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot +const BATCH_WITNESSES = "hermez_batch_witnesses" // batch number -> witness +const BATCH_COUNTERS = "hermez_batch_counters" // batch number -> counters +const L1_BATCH_DATA = "l1_batch_data" // batch number -> l1 batch data from transaction call data +const REUSED_L1_INFO_TREE_INDEX = "reused_l1_info_tree_index" // block number => const 1 +const LATEST_USED_GER = "latest_used_ger" // batch number -> GER latest used GER +const BATCH_BLOCKS = "batch_blocks" // batch number -> block numbers (concatenated together) +const SMT_DEPTHS = "smt_depths" // block number -> smt depth +const L1_INFO_LEAVES = "l1_info_leaves" // l1 info tree index -> l1 info tree leaf +const L1_INFO_ROOTS = "l1_info_roots" // root hash -> l1 info tree index +const INVALID_BATCHES = "invalid_batches" // batch number -> true +const BATCH_PARTIALLY_PROCESSED = "batch_partially_processed" // batch number -> true +const LOCAL_EXIT_ROOTS = "local_exit_roots" // l2 block number -> local exit root +const ROllUP_TYPES_FORKS = "rollup_types_forks" // rollup type id -> fork id +const FORK_HISTORY = "fork_history" // index -> fork id + last verified batch +const JUST_UNWOUND = "just_unwound" // batch number -> true + +var HermezDbTables = []string{ + L1VERIFICATIONS, + L1SEQUENCES, + FORKIDS, + FORKID_BLOCK, + BLOCKBATCHES, + GLOBAL_EXIT_ROOTS, + BLOCK_GLOBAL_EXIT_ROOTS, + GLOBAL_EXIT_ROOTS_BATCHES, + TX_PRICE_PERCENTAGE, + STATE_ROOTS, + L1_INFO_TREE_UPDATES, + L1_INFO_TREE_UPDATES_BY_GER, + BLOCK_L1_INFO_TREE_INDEX, + BLOCK_L1_INFO_TREE_INDEX_PROGRESS, + L1_INJECTED_BATCHES, + BLOCK_INFO_ROOTS, + L1_BLOCK_HASHES, + BLOCK_L1_BLOCK_HASHES, + L1_BLOCK_HASH_GER, + INTERMEDIATE_TX_STATEROOTS, + BATCH_WITNESSES, + BATCH_COUNTERS, + L1_BATCH_DATA, + REUSED_L1_INFO_TREE_INDEX, + LATEST_USED_GER, + BATCH_BLOCKS, + SMT_DEPTHS, + L1_INFO_LEAVES, + L1_INFO_ROOTS, + INVALID_BATCHES, + BATCH_PARTIALLY_PROCESSED, + LOCAL_EXIT_ROOTS, + ROllUP_TYPES_FORKS, + FORK_HISTORY, + JUST_UNWOUND, +} type HermezDb struct { tx kv.RwTx @@ -72,42 +112,7 @@ func NewHermezDb(tx kv.RwTx) *HermezDb { } func CreateHermezBuckets(tx kv.RwTx) error { - tables := []string{ - L1VERIFICATIONS, - L1SEQUENCES, - FORKIDS, - FORKID_BLOCK, - BLOCKBATCHES, - GLOBAL_EXIT_ROOTS, - BLOCK_GLOBAL_EXIT_ROOTS, - GLOBAL_EXIT_ROOTS_BATCHES, - TX_PRICE_PERCENTAGE, - STATE_ROOTS, - L1_INFO_TREE_UPDATES, - L1_INFO_TREE_UPDATES_BY_GER, - BLOCK_L1_INFO_TREE_INDEX, - L1_INJECTED_BATCHES, - BLOCK_INFO_ROOTS, - L1_BLOCK_HASHES, - BLOCK_L1_BLOCK_HASHES, - L1_BLOCK_HASH_GER, - INTERMEDIATE_TX_STATEROOTS, - BATCH_WITNESSES, - BATCH_COUNTERS, - L1_BATCH_DATA, - REUSED_L1_INFO_TREE_INDEX, - LATEST_USED_GER, - BATCH_BLOCKS, - SMT_DEPTHS, - L1_INFO_LEAVES, - L1_INFO_ROOTS, - INVALID_BATCHES, - BATCH_PARTIALLY_PROCESSED, - LOCAL_EXIT_ROOTS, - ROllUP_TYPES_FORKS, - FORK_HISTORY, - } - for _, t := range tables { + for _, t := range HermezDbTables { if err := tx.CreateBucket(t); err != nil { return err } @@ -169,6 +174,14 @@ func (db *HermezDbReader) GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error) return blocks, nil } +func concatenateBlockNumbers(blocks []uint64) []byte { + v := make([]byte, len(blocks)*8) + for i, block := range blocks { + copy(v[i*8:(i+1)*8], Uint64ToBytes(block)) + } + return v +} + func parseConcatenatedBlockNumbers(v []byte) []uint64 { count := len(v) / 8 blocks := make([]uint64, count) @@ -763,18 +776,6 @@ func (db *HermezDbReader) GetBlockGlobalExitRoot(l2BlockNo uint64) (common.Hash, return common.BytesToHash(bytes), nil } -func (db *HermezDb) TruncateBlockGlobalExitRoot(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_GLOBAL_EXIT_ROOTS, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - // from and to are inclusive func (db *HermezDbReader) GetBlockGlobalExitRoots(fromBlockNo, toBlockNo uint64) ([]common.Hash, error) { c, err := db.tx.Cursor(BLOCK_GLOBAL_EXIT_ROOTS) @@ -814,18 +815,6 @@ func (db *HermezDbReader) GetBlockL1BlockHash(l2BlockNo uint64) (common.Hash, er return common.BytesToHash(bytes), nil } -func (db *HermezDb) TruncateBlockL1BlockHash(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_L1_BLOCK_HASHES, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - // from and to are inclusive func (db *HermezDbReader) GetBlockL1BlockHashes(fromBlockNo, toBlockNo uint64) ([]common.Hash, error) { c, err := db.tx.Cursor(BLOCK_L1_BLOCK_HASHES) @@ -980,28 +969,42 @@ func (db *HermezDb) DeleteBlockL1InfoTreeIndexes(fromBlockNum, toBlockNum uint64 // from and to are inclusive func (db *HermezDb) DeleteBlockBatches(fromBlockNum, toBlockNum uint64) error { // first, gather batch numbers related to the blocks we're about to delete - batchNos := make([]uint64, 0) - c, err := db.tx.Cursor(BLOCKBATCHES) - if err != nil { - return err + batchNumbersMap := map[uint64]struct{}{} + + // find all the batches involved + for i := fromBlockNum; i <= toBlockNum; i++ { + batch, err := db.GetBatchNoByL2Block(i) + if err != nil { + return err + } + batchNumbersMap[batch] = struct{}{} } - defer c.Close() - var k, v []byte - for k, v, err = c.First(); k != nil; k, v, err = c.Next() { + // now for each batch go and get the block numbers and remove them from the batch to block records + for batchNumber := range batchNumbersMap { + data, err := db.tx.GetOne(BATCH_BLOCKS, Uint64ToBytes(batchNumber)) if err != nil { - break + return err } - blockNum := BytesToUint64(k) - if blockNum >= fromBlockNum && blockNum <= toBlockNum { - batchNo := BytesToUint64(v) - batchNos = append(batchNos, batchNo) + blockNos := parseConcatenatedBlockNumbers(data) + + // make a new list excluding the blocks in our range + newBlockNos := make([]uint64, 0, len(blockNos)) + for _, blockNo := range blockNos { + if blockNo < fromBlockNum || blockNo > toBlockNum { + newBlockNos = append(newBlockNos, blockNo) + } } - } - // now delete the batch -> block records - for _, batchNo := range batchNos { - err := db.tx.Delete(BATCH_BLOCKS, Uint64ToBytes(batchNo)) + // concatenate the block numbers back again + newData := concatenateBlockNumbers(newBlockNos) + + // now delete/store it back + if len(newData) == 0 { + err = db.tx.Delete(BATCH_BLOCKS, Uint64ToBytes(batchNumber)) + } else { + err = db.tx.Put(BATCH_BLOCKS, Uint64ToBytes(batchNumber), newData) + } if err != nil { return err } @@ -1075,18 +1078,6 @@ func (db *HermezDbReader) GetForkIdBlock(forkId uint64) (uint64, bool, error) { return blockNum, found, err } -func (db *HermezDb) TruncateForkId(fromBatchNum, toBatchNum uint64) error { - for i := fromBatchNum; i <= toBatchNum; i++ { - err := db.tx.Delete(FORKIDS, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil -} - func (db *HermezDb) DeleteForkIdBlock(fromBlockNo, toBlockNo uint64) error { return db.deleteFromBucketWithUintKeysRange(FORKID_BLOCK, fromBlockNo, toBlockNo) } @@ -1270,38 +1261,39 @@ func (db *HermezDbReader) GetBlockL1InfoTreeIndex(blockNumber uint64) (uint64, e return BytesToUint64(v), nil } -func (db *HermezDb) TruncateBlockL1InfoTreeIndex(fromL2BlockNum, toL2BlockNum uint64) error { - for i := fromL2BlockNum; i <= toL2BlockNum; i++ { - err := db.tx.Delete(BLOCK_L1_INFO_TREE_INDEX, Uint64ToBytes(i)) - if err != nil { - return err - } - +func (db *HermezDb) WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error { + latestBlockNumber, latestL1Index, err := db.GetLatestBlockL1InfoTreeIndexProgress() + if err != nil { + return err + } + if latestBlockNumber > blockNumber { + return fmt.Errorf("unable to set l1index for block %d because it has already been set for block %d", blockNumber, latestBlockNumber) + } + if l1Index <= latestL1Index { + return nil } - return nil + k := Uint64ToBytes(blockNumber) + v := Uint64ToBytes(l1Index) + return db.tx.Put(BLOCK_L1_INFO_TREE_INDEX_PROGRESS, k, v) } -func (db *HermezDbReader) GetLatestL1InfoTreeIndex() (uint64, error) { - c, err := db.tx.Cursor(BLOCK_L1_INFO_TREE_INDEX) +func (db *HermezDbReader) GetLatestBlockL1InfoTreeIndexProgress() (uint64, uint64, error) { + c, err := db.tx.Cursor(BLOCK_L1_INFO_TREE_INDEX_PROGRESS) if err != nil { - return 0, err + return 0, 0, err } defer c.Close() - var k, v []byte - for k, v, err = c.Last(); k != nil; k, v, err = c.Prev() { - if err != nil { - break - } - - if len(v) != 0 && v[0] == 1 { - blockNum := BytesToUint64(k[:8]) - return blockNum, nil - } + k, v, err := c.Last() + if err != nil { + return 0, 0, err } + return BytesToUint64(k), BytesToUint64(v), nil +} - return 0, nil +func (db *HermezDb) DeleteBlockL1InfoTreeIndexesProgress(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(BLOCK_L1_INFO_TREE_INDEX_PROGRESS, fromBlockNum, toBlockNum) } func (db *HermezDb) WriteL1InjectedBatch(batch *types.L1InjectedBatch) error { @@ -1369,16 +1361,21 @@ func (db *HermezDbReader) GetWitness(batchNumber uint64) ([]byte, error) { return v, nil } -func (db *HermezDb) WriteBatchCounters(batchNumber uint64, counters map[string]int) error { +func (db *HermezDb) WriteBatchCounters(blockNumber uint64, counters map[string]int) error { countersJson, err := json.Marshal(counters) if err != nil { return err } - return db.tx.Put(BATCH_COUNTERS, Uint64ToBytes(batchNumber), countersJson) + return db.tx.Put(BATCH_COUNTERS, Uint64ToBytes(blockNumber), countersJson) } -func (db *HermezDbReader) GetBatchCounters(batchNumber uint64) (countersMap map[string]int, found bool, err error) { - v, err := db.tx.GetOne(BATCH_COUNTERS, Uint64ToBytes(batchNumber)) +func (db *HermezDbReader) GetLatestBatchCounters(batchNumber uint64) (countersMap map[string]int, found bool, err error) { + batchBlockNumbers, err := db.GetL2BlockNosByBatch(batchNumber) + if err != nil { + return nil, false, err + } + + v, err := db.tx.GetOne(BATCH_COUNTERS, Uint64ToBytes(batchBlockNumbers[len(batchBlockNumbers)-1])) if err != nil { return nil, false, err } @@ -1393,6 +1390,10 @@ func (db *HermezDbReader) GetBatchCounters(batchNumber uint64) (countersMap map[ return countersMap, found, nil } +func (db *HermezDb) DeleteBatchCounters(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(BATCH_COUNTERS, fromBlockNum, toBlockNum) +} + // WriteL1BatchData stores the data for a given L1 batch number // coinbase = 20 bytes // batchL2Data = remaining @@ -1424,9 +1425,8 @@ func (db *HermezDbReader) GetLastL1BatchData() (uint64, error) { return BytesToUint64(k), nil } -func (db *HermezDb) WriteLatestUsedGer(batchNo uint64, ger common.Hash) error { - batchBytes := Uint64ToBytes(batchNo) - return db.tx.Put(LATEST_USED_GER, batchBytes, ger.Bytes()) +func (db *HermezDb) WriteLatestUsedGer(blockNumber uint64, ger common.Hash) error { + return db.tx.Put(LATEST_USED_GER, Uint64ToBytes(blockNumber), ger.Bytes()) } func (db *HermezDbReader) GetLatestUsedGer() (uint64, common.Hash, error) { @@ -1447,21 +1447,8 @@ func (db *HermezDbReader) GetLatestUsedGer() (uint64, common.Hash, error) { return batchNo, ger, nil } -func (db *HermezDb) TruncateLatestUsedGers(fromBatch uint64) error { - latestBatch, _, err := db.GetLatestUsedGer() - if err != nil { - return err - } - - for i := fromBatch; i <= latestBatch; i++ { - err := db.tx.Delete(LATEST_USED_GER, Uint64ToBytes(i)) - if err != nil { - return err - } - - } - - return nil +func (db *HermezDb) DeleteLatestUsedGers(fromBlockNum, toBlockNum uint64) error { + return db.deleteFromBucketWithUintKeysRange(LATEST_USED_GER, fromBlockNum, toBlockNum) } func (db *HermezDb) WriteSmtDepth(l2BlockNo, depth uint64) error { @@ -1629,6 +1616,15 @@ func (db *HermezDbReader) GetIsBatchPartiallyProcessed(batchNo uint64) (bool, er return len(v) > 0, nil } +func (db *HermezDb) TruncateIsBatchPartiallyProcessed(fromBatch, toBatch uint64) error { + for batch := fromBatch; batch <= toBatch; batch++ { + if err := db.DeleteIsBatchPartiallyProcessed(batch); err != nil { + return err + } + } + return nil +} + func (db *HermezDb) WriteLocalExitRootForBatchNo(batchNo uint64, root common.Hash) error { return db.tx.Put(LOCAL_EXIT_ROOTS, Uint64ToBytes(batchNo), root.Bytes()) } diff --git a/zk/l1_data/l1_decoder.go b/zk/l1_data/l1_decoder.go index a9fcaf378ba..661a5e11f5e 100644 --- a/zk/l1_data/l1_decoder.go +++ b/zk/l1_data/l1_decoder.go @@ -6,15 +6,16 @@ import ( "fmt" "strings" + "encoding/binary" + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/gateway-fm/cdk-erigon-lib/common/length" "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/da" "github.com/ledgerwatch/erigon/zk/hermez_db" zktx "github.com/ledgerwatch/erigon/zk/tx" - "github.com/gateway-fm/cdk-erigon-lib/common/length" - "encoding/binary" ) type RollupBaseEtrogBatchData struct { @@ -168,8 +169,8 @@ type DecodedL1Data struct { LimitTimestamp uint64 } -func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (DecodedL1Data, error) { - decoded := DecodedL1Data{} +func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (*DecodedL1Data, error) { + decoded := &DecodedL1Data{} // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync // stage, if it is not there we need to panic as we're in a bad state batchData, err := reader.GetL1BatchData(batchNo) diff --git a/zk/legacy_executor_verifier/executor.go b/zk/legacy_executor_verifier/executor.go index 5bb839af274..f60b16a6471 100644 --- a/zk/legacy_executor_verifier/executor.go +++ b/zk/legacy_executor_verifier/executor.go @@ -230,12 +230,12 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com "match", match, "grpcUrl", e.grpcUrl, "batch", request.BatchNumber, + "blocks-count", len(resp.BlockResponses), "counters", counters, "exec-root", common.BytesToHash(resp.NewStateRoot), "our-root", request.StateRoot, "exec-old-root", common.BytesToHash(resp.OldStateRoot), - "our-old-root", oldStateRoot, - "blocks-count", len(resp.BlockResponses)) + "our-old-root", oldStateRoot) for addr, all := range resp.ReadWriteAddresses { log.Debug("executor result", diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index a0631c2bc25..4f48e58877d 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -2,6 +2,7 @@ package legacy_executor_verifier import ( "context" + "sync" "sync/atomic" "time" @@ -10,8 +11,6 @@ import ( "fmt" "strconv" - "sync" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/kv" @@ -22,7 +21,6 @@ import ( "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier/proto/github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" ) @@ -31,54 +29,62 @@ var ErrNoExecutorAvailable = fmt.Errorf("no executor available") type VerifierRequest struct { BatchNumber uint64 + BlockNumbers []uint64 ForkId uint64 StateRoot common.Hash Counters map[string]int creationTime time.Time + timeout time.Duration } -func NewVerifierRequest(batchNumber, forkId uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { +func NewVerifierRequest(forkId, batchNumber uint64, blockNumbers []uint64, stateRoot common.Hash, counters map[string]int) *VerifierRequest { + return NewVerifierRequestWithTimeout(forkId, batchNumber, blockNumbers, stateRoot, counters, 0) +} + +func NewVerifierRequestWithTimeout(forkId, batchNumber uint64, blockNumbers []uint64, stateRoot common.Hash, counters map[string]int, timeout time.Duration) *VerifierRequest { return &VerifierRequest{ BatchNumber: batchNumber, + BlockNumbers: blockNumbers, ForkId: forkId, StateRoot: stateRoot, Counters: counters, creationTime: time.Now(), + timeout: timeout, + } +} + +func (vr *VerifierRequest) IsOverdue() bool { + if vr.timeout == 0 { + return false } + + return time.Since(vr.creationTime) > vr.timeout } -func (vr *VerifierRequest) isOverdue() bool { - return time.Since(vr.creationTime) > time.Duration(30*time.Minute) +func (vr *VerifierRequest) GetLastBlockNumber() uint64 { + return vr.BlockNumbers[len(vr.BlockNumbers)-1] } type VerifierResponse struct { - BatchNumber uint64 Valid bool Witness []byte ExecutorResponse *executor.ProcessBatchResponseV2 + OriginalCounters map[string]int Error error } type VerifierBundle struct { - request *VerifierRequest - response *VerifierResponse + Request *VerifierRequest + Response *VerifierResponse } func NewVerifierBundle(request *VerifierRequest, response *VerifierResponse) *VerifierBundle { return &VerifierBundle{ - request: request, - response: response, + Request: request, + Response: response, } } -type ILegacyExecutor interface { - Verify(*Payload, *VerifierRequest, common.Hash) (bool, *executor.ProcessBatchResponseV2, error) - CheckOnline() bool - QueueLength() int - AquireAccess() - ReleaseAccess() -} - type WitnessGenerator interface { GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startBlock, endBlock uint64, debug, witnessFull bool) ([]byte, error) } @@ -86,35 +92,23 @@ type WitnessGenerator interface { type LegacyExecutorVerifier struct { db kv.RwDB cfg ethconfig.Zk - executors []ILegacyExecutor + executors []*Executor executorNumber int cancelAllVerifications atomic.Bool - quit chan struct{} - streamServer *server.DataStreamServer - stream *datastreamer.StreamServer - witnessGenerator WitnessGenerator - l1Syncer *syncer.L1Syncer - - promises []*Promise[*VerifierBundle] - addedBatches map[uint64]struct{} - - // these three items are used to keep track of where the datastream is at - // compared with the executor checks. It allows for data to arrive in strange - // orders and will backfill the stream as needed. - lowestWrittenBatch uint64 - responsesToWrite map[uint64]struct{} - responsesMtx *sync.Mutex + WitnessGenerator WitnessGenerator + + promises []*Promise[*VerifierBundle] + mtxPromises *sync.Mutex } func NewLegacyExecutorVerifier( cfg ethconfig.Zk, - executors []ILegacyExecutor, + executors []*Executor, chainCfg *chain.Config, db kv.RwDB, witnessGenerator WitnessGenerator, - l1Syncer *syncer.L1Syncer, stream *datastreamer.StreamServer, ) *LegacyExecutorVerifier { streamServer := server.NewDataStreamServer(stream, chainCfg.ChainID.Uint64()) @@ -124,18 +118,40 @@ func NewLegacyExecutorVerifier( executors: executors, executorNumber: 0, cancelAllVerifications: atomic.Bool{}, - quit: make(chan struct{}), streamServer: streamServer, - stream: stream, - witnessGenerator: witnessGenerator, - l1Syncer: l1Syncer, + WitnessGenerator: witnessGenerator, promises: make([]*Promise[*VerifierBundle], 0), - addedBatches: make(map[uint64]struct{}), - responsesToWrite: map[uint64]struct{}{}, - responsesMtx: &sync.Mutex{}, + mtxPromises: &sync.Mutex{}, } } +func (v *LegacyExecutorVerifier) StartAsyncVerification( + forkId uint64, + batchNumber uint64, + stateRoot common.Hash, + counters map[string]int, + blockNumbers []uint64, + useRemoteExecutor bool, + requestTimeout time.Duration, +) { + var promise *Promise[*VerifierBundle] + + request := NewVerifierRequestWithTimeout(forkId, batchNumber, blockNumbers, stateRoot, counters, requestTimeout) + if useRemoteExecutor { + promise = v.VerifyAsync(request, blockNumbers) + } else { + promise = v.VerifyWithoutExecutor(request, blockNumbers) + } + + v.appendPromise(promise) +} + +func (v *LegacyExecutorVerifier) appendPromise(promise *Promise[*VerifierBundle]) { + v.mtxPromises.Lock() + defer v.mtxPromises.Unlock() + v.promises = append(v.promises, promise) +} + func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, witness, streamBytes []byte, timestampLimit, firstBlockNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64) error { oldAccInputHash := common.HexToHash("0x0") payload := &Payload{ @@ -150,7 +166,7 @@ func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, } - e := v.getNextOnlineAvailableExecutor() + e := v.GetNextOnlineAvailableExecutor() if e == nil { return ErrNoExecutorAvailable } @@ -170,19 +186,19 @@ func (v *LegacyExecutorVerifier) VerifySync(tx kv.Tx, request *VerifierRequest, return executorErr } -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequencerBatchSealTime time.Duration) *Promise[*VerifierBundle] { +func (v *LegacyExecutorVerifier) VerifyAsync(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { // eager promise will do the work as soon as called in a goroutine, then we can retrieve the result later // ProcessResultsSequentiallyUnsafe relies on the fact that this function returns ALWAYS non-verifierBundle and error. The only exception is the case when verifications has been canceled. Only then the verifierBundle can be nil - promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { + return NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { verifierBundle := NewVerifierBundle(request, nil) - e := v.getNextOnlineAvailableExecutor() + e := v.GetNextOnlineAvailableExecutor() if e == nil { return verifierBundle, ErrNoExecutorAvailable } - t := utils.StartTimer("legacy-executor-verifier", "add-request-unsafe") + t := utils.StartTimer("legacy-executor-verifier", "verify-async") + defer t.LogTimer() e.AquireAccess() defer e.ReleaseAccess() @@ -191,34 +207,12 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } var err error - var blocks []uint64 - startTime := time.Now() ctx := context.Background() // mapmutation has some issue with us not having a quit channel on the context call to `Done` so // here we're creating a cancelable context and just deferring the cancel innerCtx, cancel := context.WithCancel(ctx) defer cancel() - // get the data stream bytes - for time.Since(startTime) < 3*sequencerBatchSealTime { - // we might not have blocks yet as the underlying stage loop might still be running and the tx hasn't been - // committed yet so just requeue the request - blocks, err = v.availableBlocksToProcess(innerCtx, request.BatchNumber) - if err != nil { - return verifierBundle, err - } - - if len(blocks) > 0 { - break - } - - time.Sleep(time.Second) - } - - if len(blocks) == 0 { - return verifierBundle, fmt.Errorf("still not blocks in this batch") - } - tx, err := v.db.BeginRo(innerCtx) if err != nil { return verifierBundle, err @@ -228,14 +222,14 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ hermezDb := hermez_db.NewHermezDbReader(tx) l1InfoTreeMinTimestamps := make(map[uint64]uint64) - streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) + streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blockNumbers, hermezDb, l1InfoTreeMinTimestamps, nil) if err != nil { return verifierBundle, err } - witness, err := v.witnessGenerator.GetWitnessByBlockRange(tx, ctx, blocks[0], blocks[len(blocks)-1], false, v.cfg.WitnessFull) + witness, err := v.WitnessGenerator.GetWitnessByBlockRange(tx, innerCtx, blockNumbers[0], blockNumbers[len(blockNumbers)-1], false, v.cfg.WitnessFull) if err != nil { - return nil, err + return verifierBundle, err } log.Debug("witness generated", "data", hex.EncodeToString(witness)) @@ -244,7 +238,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ // timestampLimit >= currentTimestamp (from batch pre-state) + deltaTimestamp // so to ensure we have a good value we can take the timestamp of the last block in the batch // and just add 5 minutes - lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[len(blocks)-1]) + lastBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[len(blockNumbers)-1]) if err != nil { return verifierBundle, err } @@ -264,12 +258,13 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ L1InfoTreeMinTimestamps: l1InfoTreeMinTimestamps, } - previousBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) + previousBlock, err := rawdb.ReadBlockByNumber(tx, blockNumbers[0]-1) if err != nil { return verifierBundle, err } ok, executorResponse, executorErr := e.Verify(payload, request, previousBlock.Root()) + if executorErr != nil { if errors.Is(executorErr, ErrExecutorStateRootMismatch) { log.Error("[Verifier] State root mismatch detected", "err", executorErr) @@ -280,17 +275,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } } - // log timing w/o stream write - t.LogTimer() - - if ok { - if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { - log.Error("error writing data to stream", "err", err) - } - } - - verifierBundle.response = &VerifierResponse{ - BatchNumber: request.BatchNumber, + verifierBundle.Response = &VerifierResponse{ Valid: ok, Witness: witness, ExecutorResponse: executorResponse, @@ -298,70 +283,38 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ } return verifierBundle, nil }) - - // add batch to the list of batches we've added - v.addedBatches[request.BatchNumber] = struct{}{} - - // add the promise to the list of promises - v.promises = append(v.promises, promise) - return promise } -func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db.HermezDbReader, newBatch uint64) error { - t := utils.StartTimer("legacy-executor-verifier", "check-and-write-to-stream") - defer t.LogTimer() - - v.responsesMtx.Lock() - defer v.responsesMtx.Unlock() - - v.responsesToWrite[newBatch] = struct{}{} - - // if we haven't written anything yet - cold start of the node - if v.lowestWrittenBatch == 0 { - // we haven't written anything yet so lets make sure there is no gap - // in the stream for this batch - latestBatch, err := v.streamServer.GetHighestBatchNumber() - if err != nil { - return err - } - log.Info("[Verifier] Initialising on cold start", "latestBatch", latestBatch, "newBatch", newBatch) - - v.lowestWrittenBatch = latestBatch - - // check if we have the next batch we're waiting for - if latestBatch == newBatch-1 { - if err := v.WriteBatchToStream(newBatch, hdb, tx); err != nil { - return err - } - v.lowestWrittenBatch = newBatch - delete(v.responsesToWrite, newBatch) +func (v *LegacyExecutorVerifier) VerifyWithoutExecutor(request *VerifierRequest, blockNumbers []uint64) *Promise[*VerifierBundle] { + promise := NewPromise[*VerifierBundle](func() (*VerifierBundle, error) { + response := &VerifierResponse{ + // BatchNumber: request.BatchNumber, + // BlockNumber: request.BlockNumber, + Valid: true, + OriginalCounters: request.Counters, + Witness: nil, + ExecutorResponse: nil, + Error: nil, } - } + return NewVerifierBundle(request, response), nil + }) + promise.Wait() - // now check if the batch we want next is good - for { - // check if we have the next batch to write - nextBatch := v.lowestWrittenBatch + 1 - if _, ok := v.responsesToWrite[nextBatch]; !ok { - break - } + return promise +} - if err := v.WriteBatchToStream(nextBatch, hdb, tx); err != nil { - return err - } - delete(v.responsesToWrite, nextBatch) - v.lowestWrittenBatch = nextBatch - } +func (v *LegacyExecutorVerifier) ProcessResultsSequentially() ([]*VerifierBundle, error) { + v.mtxPromises.Lock() + defer v.mtxPromises.Unlock() - return nil -} + var verifierResponse []*VerifierBundle -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([]*VerifierResponse, error) { - results := make([]*VerifierResponse, 0, len(v.promises)) - for i := 0; i < len(v.promises); i++ { - verifierBundle, err := v.promises[i].TryGet() + // not a stop signal, so we can start to process our promises now + for idx, promise := range v.promises { + verifierBundle, err := promise.TryGet() if verifierBundle == nil && err == nil { + // If code enters here this means that this promise is not yet completed + // We must processes responses sequentially so if this one is not ready we can just break break } @@ -373,38 +326,35 @@ func (v *LegacyExecutorVerifier) ProcessResultsSequentiallyUnsafe(tx kv.RwTx) ([ } log.Error("error on our end while preparing the verification request, re-queueing the task", "err", err) - // this is an error on our end, so just re-create the promise at exact position where it was - if verifierBundle.request.isOverdue() { - return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.request.BatchNumber) - } - v.promises[i] = NewPromise[*VerifierBundle](v.promises[i].task) - break - } + if verifierBundle.Request.IsOverdue() { + // signal an error, the caller can check on this and stop the process if needs be + return nil, fmt.Errorf("error: batch %d couldn't be processed in 30 minutes", verifierBundle.Request.BatchNumber) + } - verifierResponse := verifierBundle.response - results = append(results, verifierResponse) - delete(v.addedBatches, verifierResponse.BatchNumber) + // re-queue the task - it should be safe to replace the index of the slice here as we only add to it + v.promises[idx] = promise.CloneAndRerun() - // no point to process any further responses if we've found an invalid one - if !verifierResponse.Valid { + // break now as we know we can't proceed here until this promise is attempted again break } + + verifierResponse = append(verifierResponse, verifierBundle) } - // leave only non-processed promises - // v.promises = v.promises[len(results):] + // remove processed promises from the list + v.promises = v.promises[len(verifierResponse):] - return results, nil + return verifierResponse, nil } -func (v *LegacyExecutorVerifier) MarkTopResponseAsProcessed(batchNumber uint64) { - v.promises = v.promises[1:] - delete(v.addedBatches, batchNumber) +func (v *LegacyExecutorVerifier) Wait() { + for _, p := range v.promises { + p.Wait() + } } -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) CancelAllRequestsUnsafe() { +func (v *LegacyExecutorVerifier) CancelAllRequests() { // cancel all promises // all queued promises will return ErrPromiseCancelled while getting its result for _, p := range v.promises { @@ -425,31 +375,10 @@ func (v *LegacyExecutorVerifier) CancelAllRequestsUnsafe() { v.cancelAllVerifications.Store(false) v.promises = make([]*Promise[*VerifierBundle], 0) - v.addedBatches = map[uint64]struct{}{} } -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) HasExecutorsUnsafe() bool { - return len(v.executors) > 0 -} - -// Unsafe is not thread-safe so it MUST be invoked only from a single thread -func (v *LegacyExecutorVerifier) IsRequestAddedUnsafe(batch uint64) bool { - _, ok := v.addedBatches[batch] - return ok -} - -func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *hermez_db.HermezDbReader, roTx kv.Tx) error { - log.Info("[Verifier] Writing batch to stream", "batch", batchNumber) - - if err := v.streamServer.WriteWholeBatchToStream("verifier", roTx, hdb, v.lowestWrittenBatch, batchNumber); err != nil { - return err - } - return nil -} - -func (v *LegacyExecutorVerifier) getNextOnlineAvailableExecutor() ILegacyExecutor { - var exec ILegacyExecutor +func (v *LegacyExecutorVerifier) GetNextOnlineAvailableExecutor() *Executor { + var exec *Executor // TODO: find executors with spare capacity @@ -469,32 +398,6 @@ func (v *LegacyExecutorVerifier) getNextOnlineAvailableExecutor() ILegacyExecuto return exec } -func (v *LegacyExecutorVerifier) availableBlocksToProcess(innerCtx context.Context, batchNumber uint64) ([]uint64, error) { - tx, err := v.db.BeginRo(innerCtx) - if err != nil { - return []uint64{}, err - } - defer tx.Rollback() - - hermezDb := hermez_db.NewHermezDbReader(tx) - blocks, err := hermezDb.GetL2BlockNosByBatch(batchNumber) - if err != nil { - return []uint64{}, err - } - - for _, blockNum := range blocks { - block, err := rawdb.ReadBlockByNumber(tx, blockNum) - if err != nil { - return []uint64{}, err - } - if block == nil { - return []uint64{}, nil - } - } - - return blocks, nil -} - func (v *LegacyExecutorVerifier) GetWholeBatchStreamBytes( batchNumber uint64, tx kv.Tx, diff --git a/zk/legacy_executor_verifier/promise.go b/zk/legacy_executor_verifier/promise.go index 33a6b2ab26c..49c22cb04f7 100644 --- a/zk/legacy_executor_verifier/promise.go +++ b/zk/legacy_executor_verifier/promise.go @@ -40,8 +40,12 @@ func NewPromise[T any](task func() (T, error)) *Promise[T] { return p } -func (p *Promise[T]) Get() (T, error) { +func (p *Promise[T]) Wait() { p.wg.Wait() // .Wait ensures that all memory operations before .Done are visible after .Wait => no need to lock/unlock the mutex +} + +func (p *Promise[T]) Get() (T, error) { + p.Wait() return p.result, p.err } @@ -56,3 +60,7 @@ func (p *Promise[T]) Cancel() { defer p.mutex.Unlock() p.cancelled = true } + +func (p *Promise[T]) CloneAndRerun() *Promise[T] { + return NewPromise[T](p.task) +} diff --git a/zk/sequencer/sequencer.go b/zk/sequencer/sequencer_env.go similarity index 100% rename from zk/sequencer/sequencer.go rename to zk/sequencer/sequencer_env.go diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index b58f0e8b75e..ca74f4cf681 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -67,7 +67,8 @@ type HermezDb interface { WriteBatchGlobalExitRoot(batchNumber uint64, ger types.GerUpdate) error WriteIntermediateTxStateRoot(l2BlockNumber uint64, txHash common.Hash, rpcRoot common.Hash) error WriteBlockL1InfoTreeIndex(blockNumber uint64, l1Index uint64) error - WriteLatestUsedGer(batchNo uint64, ger common.Hash) error + WriteBlockL1InfoTreeIndexProgress(blockNumber uint64, l1Index uint64) error + WriteLatestUsedGer(blockNo uint64, ger common.Hash) error WriteLocalExitRootForBatchNo(batchNo uint64, localExitRoot common.Hash) error } @@ -198,7 +199,7 @@ func SpawnStageBatches( blocksWritten := uint64(0) highestHashableL2BlockNo := uint64(0) - highestL1InfoTreeIndex, err := stages.GetStageProgress(tx, stages.HighestUsedL1InfoIndex) + _, highestL1InfoTreeIndex, err := hermezDb.GetLatestBlockL1InfoTreeIndexProgress() if err != nil { return fmt.Errorf("failed to get highest used l1 info index, %w", err) } @@ -424,7 +425,10 @@ LOOP: } if blocksWritten != prevAmountBlocksWritten && blocksWritten%STAGE_PROGRESS_SAVE == 0 { - if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId); err != nil { + if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId); err != nil { + return err + } + if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(lastBlockHeight, highestL1InfoTreeIndex); err != nil { return err } @@ -451,7 +455,10 @@ LOOP: return nil } - if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId); err != nil { + if err = saveStageProgress(tx, logPrefix, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId); err != nil { + return err + } + if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(lastBlockHeight, highestL1InfoTreeIndex); err != nil { return err } @@ -468,7 +475,7 @@ LOOP: return nil } -func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, highestSeenBatchNo, highestL1InfoTreeIndex, lastBlockHeight, lastForkId uint64) error { +func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, highestSeenBatchNo, lastBlockHeight, lastForkId uint64) error { var err error // store the highest hashable block number if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, highestHashableL2BlockNo); err != nil { @@ -484,10 +491,6 @@ func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, h return fmt.Errorf("save stage progress error: %v", err) } - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, uint64(highestL1InfoTreeIndex)); err != nil { - return err - } - // save the latest verified batch number as well just in case this node is upgraded // to a sequencer in the future if err := stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, highestSeenBatchNo); err != nil { @@ -618,7 +621,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c return fmt.Errorf("delete global exit roots error: %v", err) } - if err = hermezDb.TruncateLatestUsedGers(fromBatch); err != nil { + if err = hermezDb.DeleteLatestUsedGers(fromBlock, toBlock); err != nil { return fmt.Errorf("delete latest used gers error: %v", err) } @@ -696,13 +699,8 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // store the highest used l1 info index// ///////////////////////////////////////// - highestL1InfoTreeIndex, err := hermezDb.GetLatestL1InfoTreeIndex() - if err != nil { - return fmt.Errorf("get latest l1 info tree index error: %v", err) - } - - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, highestL1InfoTreeIndex); err != nil { - return err + if err := hermezDb.DeleteBlockL1InfoTreeIndexesProgress(fromBlock, toBlock); err != nil { + return nil } if err := hermezDb.DeleteBlockL1InfoTreeIndexes(fromBlock, toBlock); err != nil { @@ -877,7 +875,7 @@ func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, // we always want the last written GER in this table as it's at the batch level, so it can and should // be overwritten if !l1InfoTreeIndexReused && didStoreGer { - if err := hermezDb.WriteLatestUsedGer(l2Block.BatchNumber, l2Block.GlobalExitRoot); err != nil { + if err := hermezDb.WriteLatestUsedGer(l2Block.L2BlockNumber, l2Block.GlobalExitRoot); err != nil { return fmt.Errorf("write latest used ger error: %w", err) } } diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index c22eee32ee4..ea24a1ed917 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -218,7 +218,7 @@ func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.St expectedRootHash = syncHeadHeader.Root } - root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, true, &expectedRootHash, quit) + root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, quit) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index d407ea92e1e..1bbf298a5ec 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -6,57 +6,42 @@ import ( "time" "github.com/gateway-fm/cdk-erigon-lib/common" - "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/log/v3" - mapset "github.com/deckarep/golang-set/v2" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk" - "github.com/ledgerwatch/erigon/zk/l1_data" - zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" ) -var SpecialZeroIndexHash = common.HexToHash("0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757") - func SpawnSequencingStage( s *stagedsync.StageState, u stagedsync.Unwinder, - tx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, + historyCfg stagedsync.HistoryCfg, quiet bool, ) (err error) { logPrefix := s.LogPrefix() log.Info(fmt.Sprintf("[%s] Starting sequencing stage", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Finished sequencing stage", logPrefix)) - freshTx := tx == nil - if freshTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() + sdb, err := newStageDb(ctx, cfg.db) + if err != nil { + return err } + defer sdb.tx.Rollback() - sdb := newStageDb(tx) - - l1Recovery := cfg.zk.L1SyncStartBlock > 0 - - executionAt, err := s.ExecutionAt(tx) + executionAt, err := s.ExecutionAt(sdb.tx) if err != nil { return err } - lastBatch, err := stages.GetStageProgress(tx, stages.HighestSeenBatchNumber) + lastBatch, err := stages.GetStageProgress(sdb.tx, stages.HighestSeenBatchNumber) if err != nil { return err } @@ -71,245 +56,101 @@ func SpawnSequencingStage( return err } - getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) } - hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() - - // handle case where batch wasn't closed properly - // close it before starting a new one - // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted - // and datastream was regenerated - isLastEntryBatchEnd, err := cfg.datastreamServer.IsLastEntryBatchEnd() - if err != nil { - return err - } + var block *types.Block + runLoopBlocks := true + batchContext := newBatchContext(ctx, &cfg, &historyCfg, s, sdb) + batchState := newBatchState(forkId, prepareBatchNumber(lastBatch, isLastBatchPariallyProcessed), !isLastBatchPariallyProcessed && cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0, cfg.txPool) + blockDataSizeChecker := newBlockDataChecker() + streamWriter := newSequencerBatchStreamWriter(batchContext, batchState, lastBatch) // using lastBatch (rather than batchState.batchNumber) is not mistake // injected batch if executionAt == 0 { - // set the block height for the fork we're running at to ensure contract interactions are correct - if err = utils.RecoverySetBlockConfigForks(1, forkId, cfg.chainConfig, logPrefix); err != nil { + if err = processInjectedInitialBatch(batchContext, batchState); err != nil { return err } - header, parentBlock, err := prepareHeader(tx, executionAt, math.MaxUint64, math.MaxUint64, forkId, cfg.zk.AddressSequencer) - if err != nil { + if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { return err } - getHashFn := core.GetHashFn(header, getHeader) - blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) - - if err = processInjectedInitialBatch(ctx, cfg, s, sdb, forkId, header, parentBlock, &blockContext, l1Recovery); err != nil { - return err - } - - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { - return err - } - - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - - return nil + return sdb.tx.Commit() } - if !isLastBatchPariallyProcessed && !isLastEntryBatchEnd { - log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, lastBatch)) - ler, err := utils.GetBatchLocalExitRootFromSCStorage(lastBatch, sdb.hermezDb.HermezDbReader, tx) - if err != nil { - return err - } - - lastBlock, err := rawdb.ReadBlockByNumber(sdb.tx, executionAt) - if err != nil { - return err - } - root := lastBlock.Root() - if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, lastBatch, lastBatch-1, &root, &ler); err != nil { - return err - } - } + tryHaltSequencer(batchContext, batchState.batchNumber) if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, sdb.hermezDb, logPrefix); err != nil { return err } - var header *types.Header - var parentBlock *types.Block - - var decodedBlock zktx.DecodedBatchL2Data - var deltaTimestamp uint64 = math.MaxUint64 - var blockTransactions []types.Transaction - var l1EffectiveGases, effectiveGases []uint8 - - batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) - defer batchTicker.Stop() - nonEmptyBatchTimer := time.NewTicker(cfg.zk.SequencerNonEmptyBatchSealTime) - defer nonEmptyBatchTimer.Stop() - - hasAnyTransactionsInThisBatch := false - - thisBatch := lastBatch - // if last batch finished - start a new one - if !isLastBatchPariallyProcessed { - thisBatch++ + batchCounters, err := prepareBatchCounters(batchContext, batchState, isLastBatchPariallyProcessed) + if err != nil { + return err } - var intermediateUsedCounters *vm.Counters - if isLastBatchPariallyProcessed { - intermediateCountersMap, found, err := sdb.hermezDb.GetBatchCounters(lastBatch) - if err != nil { + if !isLastBatchPariallyProcessed { + // handle case where batch wasn't closed properly + // close it before starting a new one + // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted + // and datastream was regenerated + if err = finalizeLastBatchInDatastreamIfNotFinalized(batchContext, batchState, executionAt); err != nil { return err } - if !found { - return fmt.Errorf("intermediate counters not found for batch %d", lastBatch) - } - - intermediateUsedCounters = vm.NewCountersFromUsedMap(intermediateCountersMap) - } - - batchCounters := vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), intermediateUsedCounters) - runLoopBlocks := true - lastStartedBn := executionAt - 1 - yielded := mapset.NewSet[[32]byte]() - - nextBatchData := l1_data.DecodedL1Data{ - Coinbase: cfg.zk.AddressSequencer, - IsWorkRemaining: true, } - decodedBlocksSize := uint64(0) - limboHeaderTimestamp, limboTxHash := cfg.txPool.GetLimboTxHash(thisBatch) - limboRecovery := limboTxHash != nil - isAnyRecovery := l1Recovery || limboRecovery - - // if not limbo set the limboHeaderTimestamp to the "default" value for "prepareHeader" function - if !limboRecovery { - limboHeaderTimestamp = math.MaxUint64 - } - - if l1Recovery { - if cfg.zk.L1SyncStopBatch > 0 && thisBatch > cfg.zk.L1SyncStopBatch { - log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) + if batchState.isL1Recovery() { + if cfg.zk.L1SyncStopBatch > 0 && batchState.batchNumber > cfg.zk.L1SyncStopBatch { + log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", batchState.batchNumber) time.Sleep(1 * time.Second) return nil } // let's check if we have any L1 data to recover - nextBatchData, err = l1_data.BreakDownL1DataByBatch(thisBatch, forkId, sdb.hermezDb.HermezDbReader) - if err != nil { + if err = batchState.batchL1RecoveryData.loadBatchData(sdb); err != nil { return err } - decodedBlocksSize = uint64(len(nextBatchData.DecodedData)) - if decodedBlocksSize == 0 { - log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", thisBatch) + if !batchState.batchL1RecoveryData.hasAnyDecodedBlocks() { + log.Info(fmt.Sprintf("[%s] L1 recovery has completed!", logPrefix), "batch", batchState.batchNumber) time.Sleep(1 * time.Second) return nil } - // now look up the index associated with this info root - var infoTreeIndex uint64 - if nextBatchData.L1InfoRoot == SpecialZeroIndexHash { - infoTreeIndex = 0 - } else { - found := false - infoTreeIndex, found, err = sdb.hermezDb.GetL1InfoTreeIndexByRoot(nextBatchData.L1InfoRoot) - if err != nil { - return err - } - if !found { - return fmt.Errorf("could not find L1 info tree index for root %s", nextBatchData.L1InfoRoot.String()) - } - } - - // now let's detect a bad batch and skip it if we have to - currentBlock, err := rawdb.ReadBlockByNumber(sdb.tx, executionAt) - if err != nil { - return err - } - badBatch, err := checkForBadBatch(thisBatch, sdb.hermezDb, currentBlock.Time(), infoTreeIndex, nextBatchData.LimitTimestamp, nextBatchData.DecodedData) - if err != nil { + if handled, err := doCheckForBadBatch(batchContext, batchState, executionAt); err != nil || handled { return err } - - if badBatch { - log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", logPrefix, thisBatch)) - // store the fact that this batch was invalid during recovery - will be used for the stream later - if err = sdb.hermezDb.WriteInvalidBatch(thisBatch); err != nil { - return err - } - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, map[string]int{}); err != nil { - return err - } - if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err - } - if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, thisBatch); err != nil { - return err - } - if err = sdb.hermezDb.WriteForkId(thisBatch, forkId); err != nil { - return err - } - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil - } - } - - if !isLastBatchPariallyProcessed { - log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, thisBatch)) - } else { - log.Info(fmt.Sprintf("[%s] Continuing unfinished batch %d from block %d", logPrefix, thisBatch, executionAt)) } - blockDataSizeChecker := NewBlockDataChecker() + batchTicker, logTicker, blockTicker := prepareTickers(batchContext.cfg) + defer batchTicker.Stop() + defer logTicker.Stop() + defer blockTicker.Stop() - prevHeader := rawdb.ReadHeaderByNumber(tx, executionAt) - batchDataOverflow := false - tryHaltSequencer(logPrefix, cfg, thisBatch) + log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) - var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { - if l1Recovery { - decodedBlocksIndex := blockNumber - (executionAt + 1) - if decodedBlocksIndex == decodedBlocksSize { + log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, batchState.forkId)) + logTicker.Reset(10 * time.Second) + blockTicker.Reset(cfg.zk.SequencerBlockSealTime) + + if batchState.isL1Recovery() { + didLoadedAnyDataForRecovery := batchState.loadBlockL1RecoveryData(blockNumber - (executionAt + 1)) + if !didLoadedAnyDataForRecovery { runLoopBlocks = false break } - - decodedBlock = nextBatchData.DecodedData[decodedBlocksIndex] - deltaTimestamp = uint64(decodedBlock.DeltaTimestamp) - l1EffectiveGases = decodedBlock.EffectiveGasPricePercentages - blockTransactions = decodedBlock.Transactions } - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) + l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(blockNumber - 1) if err != nil { return err } - log.Info(fmt.Sprintf("[%s] Starting block %d (forkid %v)...", logPrefix, blockNumber, forkId)) - - lastStartedBn = blockNumber - - addedTransactions := []types.Transaction{} - addedReceipts := []*types.Receipt{} - effectiveGases = []uint8{} - addedExecutionResults := []*core.ExecutionResult{} - - header, parentBlock, err = prepareHeader(tx, blockNumber-1, deltaTimestamp, limboHeaderTimestamp, forkId, nextBatchData.Coinbase) + header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, batchState.blockState.getDeltaTimestamp(), batchState.getBlockHeaderForcedTimestamp(), batchState.forkId, batchState.getCoinbase(&cfg)) if err != nil { return err } - // run this only once the first time, do not add it on rerun - if batchDataOverflow = blockDataSizeChecker.AddBlockStartData(uint32(prevHeader.Time-header.Time), uint32(l1InfoIndex)); batchDataOverflow { + if batchDataOverflow := blockDataSizeChecker.AddBlockStartData(); batchDataOverflow { log.Info(fmt.Sprintf("[%s] BatchL2Data limit reached. Stopping.", logPrefix), "blockNumber", blockNumber) break } @@ -321,132 +162,99 @@ func SpawnSequencingStage( if err != nil { return err } - if !isAnyRecovery && overflowOnNewBlock { + if !batchState.isAnyRecovery() && overflowOnNewBlock { break } - infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, &decodedBlock, l1Recovery, header.Time) + infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, batchState, header.Time) if err != nil { return err } ibs := state.New(sdb.stateReader) - getHashFn := core.GetHashFn(header, getHeader) + getHashFn := core.GetHashFn(header, func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) }) blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.engine, &cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) + batchState.blockState.builtBlockElements.resetBlockBuildingArrays() parentRoot := parentBlock.Root() - if err = handleStateForNewBlockStarting( - cfg.chainConfig, - sdb.hermezDb, - ibs, - blockNumber, - thisBatch, - header.Time, - &parentRoot, - l1TreeUpdate, - shouldWriteGerToContract, - ); err != nil { + if err = handleStateForNewBlockStarting(batchContext, ibs, blockNumber, batchState.batchNumber, header.Time, &parentRoot, l1TreeUpdate, shouldWriteGerToContract); err != nil { return err } // start waiting for a new transaction to arrive - if !isAnyRecovery { + if !batchState.isAnyRecovery() { log.Info(fmt.Sprintf("[%s] Waiting for txs from the pool...", logPrefix)) } - // we don't care about defer order here we just need to make sure the tickers are stopped to - // avoid a leak - logTicker := time.NewTicker(10 * time.Second) - defer logTicker.Stop() - blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) - defer blockTicker.Stop() - var anyOverflow bool - // start to wait for transactions to come in from the pool and attempt to add them to the current batch. Once we detect a counter - // overflow we revert the IBS back to the previous snapshot and don't add the transaction/receipt to the collection that will - // end up in the finalised block LOOP_TRANSACTIONS: for { select { case <-logTicker.C: - if !isAnyRecovery { + if !batchState.isAnyRecovery() { log.Info(fmt.Sprintf("[%s] Waiting some more for txs from the pool...", logPrefix)) } case <-blockTicker.C: - if !isAnyRecovery { + if !batchState.isAnyRecovery() { break LOOP_TRANSACTIONS } case <-batchTicker.C: - if !isAnyRecovery { - runLoopBlocks = false - break LOOP_TRANSACTIONS - } - case <-nonEmptyBatchTimer.C: - if !isAnyRecovery && hasAnyTransactionsInThisBatch { + if !batchState.isAnyRecovery() { runLoopBlocks = false break LOOP_TRANSACTIONS } default: - if limboRecovery { - cfg.txPool.LockFlusher() - blockTransactions, err = getLimboTransaction(ctx, cfg, limboTxHash) + if batchState.isLimboRecovery() { + batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) if err != nil { - cfg.txPool.UnlockFlusher() return err } - cfg.txPool.UnlockFlusher() - } else if !l1Recovery { - cfg.txPool.LockFlusher() - blockTransactions, err = getNextPoolTransactions(ctx, cfg, executionAt, forkId, yielded) + } else if !batchState.isL1Recovery() { + batchState.blockState.transactionsForInclusion, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) if err != nil { - cfg.txPool.UnlockFlusher() return err } - cfg.txPool.UnlockFlusher() } - if len(blockTransactions) == 0 { + if len(batchState.blockState.transactionsForInclusion) == 0 { time.Sleep(250 * time.Millisecond) } else { - log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(blockTransactions)) + log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } - var receipt *types.Receipt - var execResult *core.ExecutionResult - for i, transaction := range blockTransactions { + for i, transaction := range batchState.blockState.transactionsForInclusion { txHash := transaction.Hash() + effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) - var effectiveGas uint8 - - if l1Recovery { - effectiveGas = l1EffectiveGases[i] - } else { - effectiveGas = DeriveEffectiveGasPrice(cfg, transaction) - } - + // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - if receipt, execResult, anyOverflow, err = attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, l1Recovery, forkId, l1InfoIndex, &backupDataSizeChecker); err != nil { - if limboRecovery { + receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker) + if err != nil { + if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") } // if we are in recovery just log the error as a warning. If the data is on the L1 then we should consider it as confirmed. // The executor/prover would simply skip a TX with an invalid nonce for example so we don't need to worry about that here. - if l1Recovery { + if batchState.isL1Recovery() { log.Warn(fmt.Sprintf("[%s] error adding transaction to batch during recovery: %v", logPrefix, err), "hash", txHash, "to", transaction.GetTo(), ) continue } + + // if running in normal operation mode and error != nil then just allow the code to continue + // It is safe because this approach ensures that the problematic transaction (the one that caused err != nil to be returned) is kept in yielded + // Each transaction in yielded will be reevaluated at the end of each batch } if anyOverflow { - if limboRecovery { + if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not overflow counters while re-executing") } - if !l1Recovery { - log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", thisBatch, "tx-hash", txHash, "has any transactions in this batch", hasAnyTransactionsInThisBatch) + if !batchState.isL1Recovery() { + log.Info(fmt.Sprintf("[%s] overflowed adding transaction to batch", logPrefix), "batch", batchState.batchNumber, "tx-hash", txHash, "has-any-transactions-in-this-batch", batchState.hasAnyTransactionsInThisBatch) /* There are two cases when overflow could occur. 1. The block DOES not contains any transactions. @@ -457,53 +265,49 @@ func SpawnSequencingStage( In this case, we just have to remove the transaction that overflowed the zk-counters and all transactions after it, from the yielded set. This removal will ensure that these transaction could be added in the next block(s) */ - if !hasAnyTransactionsInThisBatch { + if !batchState.hasAnyTransactionsInThisBatch { cfg.txPool.MarkForDiscardFromPendingBest(txHash) log.Trace(fmt.Sprintf("single transaction %s overflow counters", txHash)) } + + runLoopBlocks = false + break LOOP_TRANSACTIONS } - break LOOP_TRANSACTIONS } if err == nil { blockDataSizeChecker = &backupDataSizeChecker - yielded.Remove(txHash) - addedTransactions = append(addedTransactions, transaction) - addedReceipts = append(addedReceipts, receipt) - addedExecutionResults = append(addedExecutionResults, execResult) - effectiveGases = append(effectiveGases, effectiveGas) - - hasAnyTransactionsInThisBatch = true - nonEmptyBatchTimer.Reset(cfg.zk.SequencerNonEmptyBatchSealTime) - log.Debug(fmt.Sprintf("[%s] Finish block %d with %s transaction", logPrefix, blockNumber, txHash.Hex())) + batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) } } - if l1Recovery { + if batchState.isL1Recovery() { // just go into the normal loop waiting for new transactions to signal that the recovery // has finished as far as it can go - if len(blockTransactions) == 0 && !nextBatchData.IsWorkRemaining { + if batchState.isThereAnyTransactionsToRecover() { log.Info(fmt.Sprintf("[%s] L1 recovery no more transactions to recover", logPrefix)) } break LOOP_TRANSACTIONS } - if limboRecovery { + if batchState.isLimboRecovery() { runLoopBlocks = false break LOOP_TRANSACTIONS } } } - if err = sdb.hermezDb.WriteBlockL1InfoTreeIndex(blockNumber, l1TreeUpdateIndex); err != nil { + block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters) + if err != nil { return err } - block, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, ger, l1BlockHash, addedTransactions, addedReceipts, addedExecutionResults, effectiveGases, infoTreeIndexProgress, l1Recovery) - if err != nil { - return err + if batchState.isLimboRecovery() { + stateRoot := block.Root() + cfg.txPool.UpdateLimboRootByTxHash(batchState.limboRecoveryData.limboTxHash, &stateRoot) + return fmt.Errorf("[%s] %w: %s = %s", s.LogPrefix(), zk.ErrLimboState, batchState.limboRecoveryData.limboTxHash.Hex(), stateRoot.Hex()) } t.LogTimer() @@ -513,105 +317,47 @@ func SpawnSequencingStage( gasPerSecond = float64(block.GasUsed()) / elapsedSeconds } - if limboRecovery { - stateRoot := block.Root() - cfg.txPool.UpdateLimboRootByTxHash(limboTxHash, &stateRoot) - return fmt.Errorf("[%s] %w: %s = %s", s.LogPrefix(), zk.ErrLimboState, limboTxHash.Hex(), stateRoot.Hex()) - } else { - log.Debug(fmt.Sprintf("[%s] state root at block %d = %s", s.LogPrefix(), blockNumber, block.Root().Hex())) - } - if gasPerSecond != 0 { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(addedTransactions), int(gasPerSecond))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions... (%d gas/s)", logPrefix, blockNumber, len(batchState.blockState.builtBlockElements.transactions), int(gasPerSecond))) } else { - log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(addedTransactions))) + log.Info(fmt.Sprintf("[%s] Finish block %d with %d transactions...", logPrefix, blockNumber, len(batchState.blockState.builtBlockElements.transactions))) } - if !hasExecutorForThisBatch { - // save counters midbatch - // here they shouldn't add more to counters other than what they already have - // because it would be later added twice - counters := batchCounters.CombineCollectorsNoChanges(l1InfoIndex != 0) - - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { - return err - } + // add a check to the verifier and also check for responses + batchState.onBuiltBlock(blockNumber) - if err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch); err != nil { - return err - } + // commit block data here so it is accessible in other threads + if errCommitAndStart := sdb.CommitAndStart(); errCommitAndStart != nil { + return errCommitAndStart + } + defer sdb.tx.Rollback() - if err = cfg.datastreamServer.WriteBlockWithBatchStartToStream(logPrefix, tx, sdb.hermezDb, forkId, thisBatch, lastBatch, *parentBlock, *block); err != nil { - return err - } + cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, batchState.hasExecutorForThisBatch, batchContext.cfg.zk.SequencerBatchVerificationTimeout) - if err = tx.Commit(); err != nil { - return err - } - if tx, err = cfg.db.BeginRw(ctx); err != nil { - return err - } - // TODO: This creates stacked up deferrals - defer tx.Rollback() - sdb.SetTx(tx) + // check for new responses from the verifier + needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) - lastBatch = thisBatch + // lets commit everything after updateStreamAndCheckRollback no matter of its result + if errCommitAndStart := sdb.CommitAndStart(); errCommitAndStart != nil { + return errCommitAndStart } - } + defer sdb.tx.Rollback() - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(lastStartedBn) - if err != nil { - return err - } - - counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) - if err != nil { - return err - } - - log.Info(fmt.Sprintf("[%s] counters consumed", logPrefix), "batch", thisBatch, "counts", counters.UsedAsString()) - if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { - return err + // check the return values of updateStreamAndCheckRollback + if err != nil || needsUnwind { + return err + } } - if err = sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + cfg.legacyVerifier.Wait() + needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) + if err != nil || needsUnwind { return err } - // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch - ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, sdb.hermezDb.HermezDbReader, tx) - if err != nil { - return err - } - // write ler to hermezdb - if err = sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + if err = runBatchLastSteps(batchContext, batchState.batchNumber, block.NumberU64(), batchCounters); err != nil { return err } - log.Info(fmt.Sprintf("[%s] Finish batch %d...", logPrefix, thisBatch)) - - if !hasExecutorForThisBatch { - blockRoot := block.Root() - if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, lastBatch, &blockRoot, &ler); err != nil { - return err - } - } - - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - - return nil -} - -func tryHaltSequencer(logPrefix string, cfg SequenceBlockCfg, thisBatch uint64) { - if cfg.zk.SequencerHaltOnBatchNumber != 0 && - cfg.zk.SequencerHaltOnBatchNumber == thisBatch { - for { - log.Info(fmt.Sprintf("[%s] Halt sequencer on batch %d...", logPrefix, thisBatch)) - time.Sleep(5 * time.Second) //nolint:gomnd - } - } + return sdb.tx.Commit() } diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go new file mode 100644 index 00000000000..5bc6bf3c7e1 --- /dev/null +++ b/zk/stages/stage_sequence_execute_batch.go @@ -0,0 +1,191 @@ +package stages + +import ( + "fmt" + "time" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" +) + +func prepareBatchNumber(lastBatch uint64, isLastBatchPariallyProcessed bool) uint64 { + if isLastBatchPariallyProcessed { + return lastBatch + } + + return lastBatch + 1 +} + +func prepareBatchCounters(batchContext *BatchContext, batchState *BatchState, isLastBatchPariallyProcessed bool) (*vm.BatchCounterCollector, error) { + var intermediateUsedCounters *vm.Counters + if isLastBatchPariallyProcessed { + intermediateCountersMap, found, err := batchContext.sdb.hermezDb.GetLatestBatchCounters(batchState.batchNumber) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("intermediate counters not found for batch %d", batchState.batchNumber) + } + + intermediateUsedCounters = vm.NewCountersFromUsedMap(intermediateCountersMap) + } + + return vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), intermediateUsedCounters), nil +} + +func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) (bool, error) { + infoTreeIndex, err := batchState.batchL1RecoveryData.getInfoTreeIndex(batchContext.sdb) + if err != nil { + return false, err + } + + // now let's detect a bad batch and skip it if we have to + currentBlock, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, thisBlock) + if err != nil { + return false, err + } + + badBatch, err := checkForBadBatch(batchState.batchNumber, batchContext.sdb.hermezDb, currentBlock.Time(), infoTreeIndex, batchState.batchL1RecoveryData.recoveredBatchData.LimitTimestamp, batchState.batchL1RecoveryData.recoveredBatchData.DecodedData) + if err != nil { + return false, err + } + + if !badBatch { + return false, nil + } + + log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", batchContext.s.LogPrefix(), batchState.batchNumber)) + // store the fact that this batch was invalid during recovery - will be used for the stream later + if err = batchContext.sdb.hermezDb.WriteInvalidBatch(batchState.batchNumber); err != nil { + return false, err + } + if err = batchContext.sdb.hermezDb.WriteBatchCounters(currentBlock.NumberU64(), map[string]int{}); err != nil { + return false, err + } + if err = batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(batchState.batchNumber); err != nil { + return false, err + } + if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { + return false, err + } + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { + return false, err + } + if err = batchContext.sdb.tx.Commit(); err != nil { + return false, err + } + return true, nil +} + +func updateStreamAndCheckRollback( + batchContext *BatchContext, + batchState *BatchState, + streamWriter *SequencerBatchStreamWriter, + u stagedsync.Unwinder, +) (bool, error) { + checkedVerifierBundles, err := streamWriter.CommitNewUpdates() + if err != nil { + return false, err + } + + infiniteLoop := func(batchNumber uint64) { + // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks + for { + log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", batchContext.s.LogPrefix(), batchNumber)) + time.Sleep(time.Minute) + } + } + + for _, verifierBundle := range checkedVerifierBundles { + if verifierBundle.Response.Valid { + continue + } + + // The sequencer can goes to this point of the code only in L1Recovery mode or Default Mode. + // There is no way to get here in LimboRecoveryMode + // If we are here in L1RecoveryMode then let's stop everything by using an infinite loop because something is quite wrong + // If we are here in Default mode and limbo is disabled then again do the same as in L1RecoveryMode + // If we are here in Default mode and limbo is enabled then continue normal flow + if batchState.isL1Recovery() || !batchContext.cfg.zk.Limbo { + infiniteLoop(verifierBundle.Request.BatchNumber) + } + + if err = handleLimbo(batchContext, batchState, verifierBundle); err != nil { + return false, err + } + + unwindTo := verifierBundle.Request.GetLastBlockNumber() - 1 + + // for unwind we supply the block number X-1 of the block we want to remove, but supply the hash of the block + // causing the unwind. + unwindHeader := rawdb.ReadHeaderByNumber(batchContext.sdb.tx, verifierBundle.Request.GetLastBlockNumber()) + if unwindHeader == nil { + return false, fmt.Errorf("could not find header for block %d", verifierBundle.Request.GetLastBlockNumber()) + } + + log.Warn(fmt.Sprintf("[%s] Block is invalid - rolling back", batchContext.s.LogPrefix()), "badBlock", verifierBundle.Request.GetLastBlockNumber(), "unwindTo", unwindTo, "root", unwindHeader.Root) + + u.UnwindTo(unwindTo, unwindHeader.Hash()) + streamWriter.legacyVerifier.CancelAllRequests() + return true, nil + } + + return false, nil +} + +func runBatchLastSteps( + batchContext *BatchContext, + thisBatch uint64, + blockNumber uint64, + batchCounters *vm.BatchCounterCollector, +) error { + l1InfoIndex, err := batchContext.sdb.hermezDb.GetBlockL1InfoTreeIndex(blockNumber) + if err != nil { + return err + } + + counters, err := batchCounters.CombineCollectors(l1InfoIndex != 0) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] counters consumed", batchContext.s.LogPrefix()), "batch", thisBatch, "counts", counters.UsedAsString()) + + if err = batchContext.sdb.hermezDb.WriteBatchCounters(blockNumber, counters.UsedAsMap()); err != nil { + return err + } + if err := batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(thisBatch); err != nil { + return err + } + + // Local Exit Root (ler): read s/c storage every batch to store the LER for the highest block in the batch + ler, err := utils.GetBatchLocalExitRootFromSCStorage(thisBatch, batchContext.sdb.hermezDb.HermezDbReader, batchContext.sdb.tx) + if err != nil { + return err + } + // write ler to hermezdb + if err = batchContext.sdb.hermezDb.WriteLocalExitRootForBatchNo(thisBatch, ler); err != nil { + return err + } + + lastBlock, err := batchContext.sdb.hermezDb.GetHighestBlockInBatch(thisBatch) + if err != nil { + return err + } + block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, lastBlock) + if err != nil { + return err + } + blockRoot := block.Root() + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, thisBatch, &blockRoot, &ler); err != nil { + return err + } + + log.Info(fmt.Sprintf("[%s] Finish batch %d...", batchContext.s.LogPrefix(), thisBatch)) + + return nil +} diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 23637883d4f..495e9114846 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -1,7 +1,6 @@ package stages import ( - "context" "fmt" "github.com/gateway-fm/cdk-erigon-lib/common" @@ -9,14 +8,13 @@ import ( "math/big" - "github.com/ledgerwatch/erigon/chain" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/smt/pkg/blockinfo" - "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/zk/erigon_db" "github.com/ledgerwatch/erigon/zk/hermez_db" zktypes "github.com/ledgerwatch/erigon/zk/types" @@ -25,8 +23,7 @@ import ( ) func handleStateForNewBlockStarting( - chainConfig *chain.Config, - hermezDb *hermez_db.HermezDb, + batchContext *BatchContext, ibs *state.IntraBlockState, blockNumber uint64, batchNumber uint64, @@ -35,6 +32,9 @@ func handleStateForNewBlockStarting( l1info *zktypes.L1InfoTreeUpdate, shouldWriteGerToContract bool, ) error { + chainConfig := batchContext.cfg.chainConfig + hermezDb := batchContext.sdb.hermezDb + ibs.PreExecuteStateSet(chainConfig, blockNumber, timestamp, stateRoot) // handle writing to the ger manager contract but only if the index is above 0 @@ -56,7 +56,7 @@ func handleStateForNewBlockStarting( if l1BlockHash == (common.Hash{}) { // not in the contract so let's write it! ibs.WriteGerManagerL1BlockHash(l1info.GER, l1info.ParentHash) - if err := hermezDb.WriteLatestUsedGer(batchNumber, l1info.GER); err != nil { + if err := hermezDb.WriteLatestUsedGer(blockNumber, l1info.GER); err != nil { return err } } @@ -66,29 +66,68 @@ func handleStateForNewBlockStarting( return nil } +func doFinishBlockAndUpdateState( + batchContext *BatchContext, + ibs *state.IntraBlockState, + header *types.Header, + parentBlock *types.Block, + batchState *BatchState, + ger common.Hash, + l1BlockHash common.Hash, + l1TreeUpdateIndex uint64, + infoTreeIndexProgress uint64, + batchCounters *vm.BatchCounterCollector, +) (*types.Block, error) { + thisBlockNumber := header.Number.Uint64() + + if batchContext.cfg.accumulator != nil { + batchContext.cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) + } + + block, err := finaliseBlock(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters) + if err != nil { + return nil, err + } + + if err := updateSequencerProgress(batchContext.sdb.tx, thisBlockNumber, batchState.batchNumber, false); err != nil { + return nil, err + } + + if batchContext.cfg.accumulator != nil { + txs, err := rawdb.RawTransactionsRange(batchContext.sdb.tx, thisBlockNumber, thisBlockNumber) + if err != nil { + return nil, err + } + batchContext.cfg.accumulator.ChangeTransactions(txs) + } + + return block, nil +} + func finaliseBlock( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, + batchContext *BatchContext, ibs *state.IntraBlockState, newHeader *types.Header, parentBlock *types.Block, - forkId uint64, - batch uint64, - accumulator *shards.Accumulator, + batchState *BatchState, ger common.Hash, l1BlockHash common.Hash, - transactions []types.Transaction, - receipts types.Receipts, - execResults []*core.ExecutionResult, - effectiveGases []uint8, - l1Recovery bool, + l1TreeUpdateIndex uint64, + infoTreeIndexProgress uint64, + batchCounters *vm.BatchCounterCollector, ) (*types.Block, error) { - stateWriter := state.NewPlainStateWriter(sdb.tx, sdb.tx, newHeader.Number.Uint64()).SetAccumulator(accumulator) + thisBlockNumber := newHeader.Number.Uint64() + if err := batchContext.sdb.hermezDb.WriteBlockL1InfoTreeIndex(thisBlockNumber, l1TreeUpdateIndex); err != nil { + return nil, err + } + if err := batchContext.sdb.hermezDb.WriteBlockL1InfoTreeIndexProgress(thisBlockNumber, infoTreeIndexProgress); err != nil { + return nil, err + } + + stateWriter := state.NewPlainStateWriter(batchContext.sdb.tx, batchContext.sdb.tx, newHeader.Number.Uint64()).SetAccumulator(batchContext.cfg.accumulator) chainReader := stagedsync.ChainReader{ - Cfg: *cfg.chainConfig, - Db: sdb.tx, + Cfg: *batchContext.cfg.chainConfig, + Db: batchContext.sdb.tx, } var excessDataGas *big.Int @@ -97,48 +136,49 @@ func finaliseBlock( } txInfos := []blockinfo.ExecutedTxInfo{} - for i, tx := range transactions { + builtBlockElements := batchState.blockState.builtBlockElements + for i, tx := range builtBlockElements.transactions { var from common.Address var err error sender, ok := tx.GetSender() if ok { from = sender } else { - signer := types.MakeSigner(cfg.chainConfig, newHeader.Number.Uint64()) + signer := types.MakeSigner(batchContext.cfg.chainConfig, newHeader.Number.Uint64()) from, err = tx.Sender(*signer) if err != nil { return nil, err } } - localReceipt := core.CreateReceiptForBlockInfoTree(receipts[i], cfg.chainConfig, newHeader.Number.Uint64(), execResults[i]) + localReceipt := core.CreateReceiptForBlockInfoTree(builtBlockElements.receipts[i], batchContext.cfg.chainConfig, newHeader.Number.Uint64(), builtBlockElements.executionResults[i]) txInfos = append(txInfos, blockinfo.ExecutedTxInfo{ Tx: tx, - EffectiveGasPrice: effectiveGases[i], + EffectiveGasPrice: builtBlockElements.effectiveGases[i], Receipt: localReceipt, Signer: &from, }) } - if err := postBlockStateHandling(cfg, ibs, sdb.hermezDb, newHeader, ger, l1BlockHash, parentBlock.Root(), txInfos); err != nil { + if err := postBlockStateHandling(*batchContext.cfg, ibs, batchContext.sdb.hermezDb, newHeader, ger, l1BlockHash, parentBlock.Root(), txInfos); err != nil { return nil, err } - if l1Recovery { - for i, receipt := range receipts { - core.ProcessReceiptForBlockExecution(receipt, sdb.hermezDb.HermezDbReader, cfg.chainConfig, newHeader.Number.Uint64(), newHeader, transactions[i]) + if batchState.isL1Recovery() { + for i, receipt := range builtBlockElements.receipts { + core.ProcessReceiptForBlockExecution(receipt, batchContext.sdb.hermezDb.HermezDbReader, batchContext.cfg.chainConfig, newHeader.Number.Uint64(), newHeader, builtBlockElements.transactions[i]) } } - finalBlock, finalTransactions, finalReceipts, err := core.FinalizeBlockExecutionWithHistoryWrite( - cfg.engine, - sdb.stateReader, + finalBlock, finalTransactions, finalReceipts, err := core.FinalizeBlockExecution( + batchContext.cfg.engine, + batchContext.sdb.stateReader, newHeader, - transactions, + builtBlockElements.transactions, []*types.Header{}, // no uncles stateWriter, - cfg.chainConfig, + batchContext.cfg.chainConfig, ibs, - receipts, + builtBlockElements.receipts, nil, // no withdrawals chainReader, true, @@ -148,58 +188,85 @@ func finaliseBlock( return nil, err } - newRoot, err := zkIncrementIntermediateHashes(ctx, s.LogPrefix(), s, sdb.tx, sdb.eridb, sdb.smt, newHeader.Number.Uint64()-1, newHeader.Number.Uint64()) + // this is actually the interhashes stage + newRoot, err := zkIncrementIntermediateHashes(batchContext.ctx, batchContext.s.LogPrefix(), batchContext.s, batchContext.sdb.tx, batchContext.sdb.eridb, batchContext.sdb.smt, newHeader.Number.Uint64()-1, newHeader.Number.Uint64()) if err != nil { return nil, err } finalHeader := finalBlock.HeaderNoCopy() finalHeader.Root = newRoot - finalHeader.Coinbase = cfg.zk.AddressSequencer - finalHeader.GasLimit = utils.GetBlockGasLimitForFork(forkId) - finalHeader.ReceiptHash = types.DeriveSha(receipts) - finalHeader.Bloom = types.CreateBloom(receipts) + finalHeader.Coinbase = batchContext.cfg.zk.AddressSequencer + finalHeader.GasLimit = utils.GetBlockGasLimitForFork(batchState.forkId) + finalHeader.ReceiptHash = types.DeriveSha(builtBlockElements.receipts) + finalHeader.Bloom = types.CreateBloom(builtBlockElements.receipts) newNum := finalBlock.Number() - err = rawdb.WriteHeader_zkEvm(sdb.tx, finalHeader) + err = rawdb.WriteHeader_zkEvm(batchContext.sdb.tx, finalHeader) if err != nil { return nil, fmt.Errorf("failed to write header: %v", err) } - if err := rawdb.WriteHeadHeaderHash(sdb.tx, finalHeader.Hash()); err != nil { + if err := rawdb.WriteHeadHeaderHash(batchContext.sdb.tx, finalHeader.Hash()); err != nil { return nil, err } - err = rawdb.WriteCanonicalHash(sdb.tx, finalHeader.Hash(), newNum.Uint64()) + err = rawdb.WriteCanonicalHash(batchContext.sdb.tx, finalHeader.Hash(), newNum.Uint64()) if err != nil { return nil, fmt.Errorf("failed to write header: %v", err) } - erigonDB := erigon_db.NewErigonDb(sdb.tx) + erigonDB := erigon_db.NewErigonDb(batchContext.sdb.tx) err = erigonDB.WriteBody(newNum, finalHeader.Hash(), finalTransactions) if err != nil { return nil, fmt.Errorf("failed to write body: %v", err) } // write the new block lookup entries - rawdb.WriteTxLookupEntries(sdb.tx, finalBlock) + rawdb.WriteTxLookupEntries(batchContext.sdb.tx, finalBlock) - if err = rawdb.WriteReceipts(sdb.tx, newNum.Uint64(), finalReceipts); err != nil { + if err = rawdb.WriteReceipts(batchContext.sdb.tx, newNum.Uint64(), finalReceipts); err != nil { return nil, err } - if err = sdb.hermezDb.WriteForkId(batch, forkId); err != nil { + if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { return nil, err } // now process the senders to avoid a stage by itself - if err := addSenders(cfg, newNum, finalTransactions, sdb.tx, finalHeader); err != nil { + if err := addSenders(*batchContext.cfg, newNum, finalTransactions, batchContext.sdb.tx, finalHeader); err != nil { return nil, err } // now add in the zk batch to block references - if err := sdb.hermezDb.WriteBlockBatch(newNum.Uint64(), batch); err != nil { + if err := batchContext.sdb.hermezDb.WriteBlockBatch(newNum.Uint64(), batchState.batchNumber); err != nil { return nil, fmt.Errorf("write block batch error: %v", err) } + // write batch counters + err = batchContext.sdb.hermezDb.WriteBatchCounters(newNum.Uint64(), batchCounters.CombineCollectorsNoChanges().UsedAsMap()) + if err != nil { + return nil, err + } + + // write partially processed + err = batchContext.sdb.hermezDb.WriteIsBatchPartiallyProcessed(batchState.batchNumber) + if err != nil { + return nil, err + } + + // this is actually account + storage indices stages + quitCh := batchContext.ctx.Done() + from := newNum.Uint64() + if from == 1 { + from = 0 + } + to := newNum.Uint64() + 1 + if err = stagedsync.PromoteHistory(batchContext.s.LogPrefix(), batchContext.sdb.tx, kv.AccountChangeSet, from, to, *batchContext.historyCfg, quitCh); err != nil { + return nil, err + } + if err = stagedsync.PromoteHistory(batchContext.s.LogPrefix(), batchContext.sdb.tx, kv.StorageChangeSet, from, to, *batchContext.historyCfg, quitCh); err != nil { + return nil, err + } + return finalBlock, nil } diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go new file mode 100644 index 00000000000..373511a1212 --- /dev/null +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -0,0 +1,106 @@ +package stages + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/zk/datastream/server" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" +) + +type SequencerBatchStreamWriter struct { + ctx context.Context + logPrefix string + legacyVerifier *verifier.LegacyExecutorVerifier + sdb *stageDb + streamServer *server.DataStreamServer + hasExecutors bool + lastBatch uint64 +} + +func newSequencerBatchStreamWriter(batchContext *BatchContext, batchState *BatchState, lastBatch uint64) *SequencerBatchStreamWriter { + return &SequencerBatchStreamWriter{ + ctx: batchContext.ctx, + logPrefix: batchContext.s.LogPrefix(), + legacyVerifier: batchContext.cfg.legacyVerifier, + sdb: batchContext.sdb, + streamServer: batchContext.cfg.datastreamServer, + hasExecutors: batchState.hasExecutorForThisBatch, + lastBatch: lastBatch, + } +} + +func (sbc *SequencerBatchStreamWriter) CommitNewUpdates() ([]*verifier.VerifierBundle, error) { + verifierBundles, err := sbc.legacyVerifier.ProcessResultsSequentially() + if err != nil { + return nil, err + } + + return sbc.writeBlockDetailsToDatastream(verifierBundles) +} + +func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBundles []*verifier.VerifierBundle) ([]*verifier.VerifierBundle, error) { + var checkedVerifierBundles []*verifier.VerifierBundle = make([]*verifier.VerifierBundle, 0, len(verifiedBundles)) + for _, bundle := range verifiedBundles { + request := bundle.Request + response := bundle.Response + + if response.Valid { + parentBlock, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, request.GetLastBlockNumber()-1) + if err != nil { + return checkedVerifierBundles, err + } + block, err := rawdb.ReadBlockByNumber(sbc.sdb.tx, request.GetLastBlockNumber()) + if err != nil { + return checkedVerifierBundles, err + } + + if err := sbc.streamServer.WriteBlockWithBatchStartToStream(sbc.logPrefix, sbc.sdb.tx, sbc.sdb.hermezDb, request.ForkId, request.BatchNumber, sbc.lastBatch, *parentBlock, *block); err != nil { + return checkedVerifierBundles, err + } + + // once we have handled the very first block we can update the last batch to be the current batch safely so that + // we don't keep adding batch bookmarks in between blocks + sbc.lastBatch = request.BatchNumber + } + + checkedVerifierBundles = append(checkedVerifierBundles, bundle) + + // just break early if there is an invalid response as we don't want to process the remainder anyway + if !response.Valid { + break + } + } + + return checkedVerifierBundles, nil +} + +func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, batchState *BatchState, thisBlock uint64) error { + isLastEntryBatchEnd, err := batchContext.cfg.datastreamServer.IsLastEntryBatchEnd() + if err != nil { + return err + } + + if isLastEntryBatchEnd { + return nil + } + + log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", batchContext.s.LogPrefix(), batchState.batchNumber)) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(batchState.batchNumber, batchContext.sdb.hermezDb.HermezDbReader, batchContext.sdb.tx) + if err != nil { + return err + } + + lastBlock, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, thisBlock) + if err != nil { + return err + } + root := lastBlock.Root() + if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchState.batchNumber-1, &root, &ler); err != nil { + return err + } + return nil +} diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index 852ecfd5b9b..323b7a0f2f9 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -1,38 +1,48 @@ package stages import ( - "context" + "math" "errors" + "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/eth/stagedsync" zktx "github.com/ledgerwatch/erigon/zk/tx" zktypes "github.com/ledgerwatch/erigon/zk/types" + "github.com/ledgerwatch/erigon/zk/utils" ) const ( - injectedBatchNumber = 1 injectedBatchBlockNumber = 1 injectedBatchBatchNumber = 1 ) func processInjectedInitialBatch( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, - forkId uint64, - header *types.Header, - parentBlock *types.Block, - blockContext *evmtypes.BlockContext, - l1Recovery bool, + batchContext *BatchContext, + batchState *BatchState, ) error { - injected, err := sdb.hermezDb.GetL1InjectedBatch(0) + // set the block height for the fork we're running at to ensure contract interactions are correct + if err := utils.RecoverySetBlockConfigForks(injectedBatchBlockNumber, batchState.forkId, batchContext.cfg.chainConfig, batchContext.s.LogPrefix()); err != nil { + return err + } + + header, parentBlock, err := prepareHeader(batchContext.sdb.tx, 0, math.MaxUint64, math.MaxUint64, batchState.forkId, batchContext.cfg.zk.AddressSequencer) + if err != nil { + return err + } + + getHeader := func(hash common.Hash, number uint64) *types.Header { + return rawdb.ReadHeader(batchContext.sdb.tx, hash, number) + } + getHashFn := core.GetHashFn(header, getHeader) + blockContext := core.NewEVMBlockContext(header, getHashFn, batchContext.cfg.engine, &batchContext.cfg.zk.AddressSequencer, parentBlock.ExcessDataGas()) + + injected, err := batchContext.sdb.hermezDb.GetL1InjectedBatch(0) if err != nil { return err } @@ -43,43 +53,39 @@ func processInjectedInitialBatch( Timestamp: injected.Timestamp, } - ibs := state.New(sdb.stateReader) + ibs := state.New(batchContext.sdb.stateReader) // the injected batch block timestamp should also match that of the injected batch header.Time = injected.Timestamp parentRoot := parentBlock.Root() - if err = handleStateForNewBlockStarting( - cfg.chainConfig, - sdb.hermezDb, - ibs, - injectedBatchBlockNumber, - injectedBatchBatchNumber, - injected.Timestamp, - &parentRoot, - fakeL1TreeUpdate, - true, - ); err != nil { + if err = handleStateForNewBlockStarting(batchContext, ibs, injectedBatchBlockNumber, injectedBatchBatchNumber, injected.Timestamp, &parentRoot, fakeL1TreeUpdate, true); err != nil { return err } - txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(cfg, sdb, ibs, blockContext, injected, header, parentBlock, forkId) + txn, receipt, execResult, effectiveGas, err := handleInjectedBatch(batchContext, ibs, &blockContext, injected, header, parentBlock, batchState.forkId) if err != nil { return err } - txns := types.Transactions{*txn} - receipts := types.Receipts{receipt} - execResults := []*core.ExecutionResult{execResult} - effectiveGases := []uint8{effectiveGas} + batchState.blockState.builtBlockElements = BuiltBlockElements{ + transactions: types.Transactions{*txn}, + receipts: types.Receipts{receipt}, + executionResults: []*core.ExecutionResult{execResult}, + effectiveGases: []uint8{effectiveGas}, + } + batchCounters := vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(batchState.forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()), nil) + + if _, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, injected.LastGlobalExitRoot, injected.L1ParentHash, 0, 0, batchCounters); err != nil { + return err + } - _, err = doFinishBlockAndUpdateState(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, injectedBatchNumber, injected.LastGlobalExitRoot, injected.L1ParentHash, txns, receipts, execResults, effectiveGases, 0, l1Recovery) - return err + // deleting the partially processed flag + return batchContext.sdb.hermezDb.DeleteIsBatchPartiallyProcessed(injectedBatchBatchNumber) } func handleInjectedBatch( - cfg SequenceBlockCfg, - sdb *stageDb, + batchContext *BatchContext, ibs *state.IntraBlockState, blockContext *evmtypes.BlockContext, injected *zktypes.L1InjectedBatch, @@ -98,11 +104,11 @@ func handleInjectedBatch( return nil, nil, nil, 0, errors.New("expected 1 transaction in the injected batch") } - batchCounters := vm.NewBatchCounterCollector(sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(false), nil) + batchCounters := vm.NewBatchCounterCollector(batchContext.sdb.smt.GetDepth(), uint16(forkId), batchContext.cfg.zk.VirtualCountersSmtReduction, batchContext.cfg.zk.ShouldCountersBeUnlimited(false), nil) // process the tx and we can ignore the counters as an overflow at this stage means no network anyway - effectiveGas := DeriveEffectiveGasPrice(cfg, decodedBlocks[0].Transactions[0]) - receipt, execResult, _, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) + effectiveGas := DeriveEffectiveGasPrice(*batchContext.cfg, decodedBlocks[0].Transactions[0]) + receipt, execResult, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) if err != nil { return nil, nil, nil, 0, err } diff --git a/zk/stages/stage_sequence_execute_limbo.go b/zk/stages/stage_sequence_execute_limbo.go new file mode 100644 index 00000000000..64a9b6ae6e1 --- /dev/null +++ b/zk/stages/stage_sequence_execute_limbo.go @@ -0,0 +1,138 @@ +package stages + +import ( + "bytes" + "fmt" + "math" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/txpool" + "github.com/ledgerwatch/log/v3" +) + +type limboStreamBytesGroup struct { + blockNumber uint64 + transactionsIndicesInBlock []int +} + +func newLimboStreamBytesGroup(blockNumber uint64) *limboStreamBytesGroup { + return &limboStreamBytesGroup{ + blockNumber: blockNumber, + transactionsIndicesInBlock: make([]int, 0, 1), + } +} + +type limboStreamBytesBuilderHelper struct { + sendersToGroupMap map[string][]*limboStreamBytesGroup +} + +func newLimboStreamBytesBuilderHelper() *limboStreamBytesBuilderHelper { + return &limboStreamBytesBuilderHelper{ + sendersToGroupMap: make(map[string][]*limboStreamBytesGroup), + } +} + +func (_this *limboStreamBytesBuilderHelper) append(senderMapKey string, blockNumber uint64, transactionIndex int) ([]uint64, [][]int) { + limboStreamBytesGroups := _this.add(senderMapKey, blockNumber, transactionIndex) + + size := len(limboStreamBytesGroups) + resultBlocks := make([]uint64, size) + resultTransactionsSet := make([][]int, size) + + for i := 0; i < size; i++ { + group := limboStreamBytesGroups[i] + resultBlocks[i] = group.blockNumber + resultTransactionsSet[i] = group.transactionsIndicesInBlock + } + + return resultBlocks, resultTransactionsSet +} + +func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber uint64, transactionIndex int) []*limboStreamBytesGroup { + limboStreamBytesGroups, ok := _this.sendersToGroupMap[senderMapKey] + if !ok { + limboStreamBytesGroups = []*limboStreamBytesGroup{newLimboStreamBytesGroup(blockNumber)} + _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups + } + group := limboStreamBytesGroups[len(limboStreamBytesGroups)-1] + if group.blockNumber != blockNumber { + group = newLimboStreamBytesGroup(blockNumber) + limboStreamBytesGroups = append(limboStreamBytesGroups, group) + _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups + } + group.transactionsIndicesInBlock = append(group.transactionsIndicesInBlock, transactionIndex) + + return limboStreamBytesGroups +} + +func handleLimbo(batchContext *BatchContext, batchState *BatchState, verifierBundle *legacy_executor_verifier.VerifierBundle) error { + request := verifierBundle.Request + legacyVerifier := batchContext.cfg.legacyVerifier + + log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", batchContext.s.LogPrefix()), "batch", request.BatchNumber) + + l1InfoTreeMinTimestamps := make(map[uint64]uint64) + if _, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, []uint64{request.GetLastBlockNumber()}, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { + return err + } + + blockNumber := request.GetLastBlockNumber() + witness, err := legacyVerifier.WitnessGenerator.GetWitnessByBlockRange(batchContext.sdb.tx, batchContext.ctx, blockNumber, blockNumber, false, batchContext.cfg.zk.WitnessFull) + if err != nil { + return err + } + + limboSendersToPreviousTxMap := make(map[string]uint32) + limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() + + limboDetails := txpool.NewLimboBatchDetails() + limboDetails.Witness = witness + limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps + limboDetails.BatchNumber = request.BatchNumber + limboDetails.ForkId = request.ForkId + + block, err := rawdb.ReadBlockByNumber(batchContext.sdb.tx, blockNumber) + if err != nil { + return err + } + + for i, transaction := range block.Transactions() { + var b []byte + buffer := bytes.NewBuffer(b) + err = transaction.EncodeRLP(buffer) + if err != nil { + return err + } + + signer := types.MakeSigner(batchContext.cfg.chainConfig, blockNumber) + sender, err := transaction.Sender(*signer) + if err != nil { + return err + } + senderMapKey := sender.Hex() + + blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) + streamBytes, err := legacyVerifier.GetWholeBatchStreamBytes(request.BatchNumber, batchContext.sdb.tx, blocksForStreamBytes, batchContext.sdb.hermezDb.HermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + if err != nil { + return err + } + + previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] + if !ok { + previousTxIndex = math.MaxUint32 + } + + hash := transaction.Hash() + limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) + limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 + + log.Info(fmt.Sprintf("[%s] adding transaction to limbo", batchContext.s.LogPrefix()), "hash", hash) + } + + limboDetails.TimestampLimit = block.Time() + limboDetails.FirstBlockNumber = block.NumberU64() + batchContext.cfg.txPool.ProcessLimboBatchDetails(limboDetails) + return nil +} diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go new file mode 100644 index 00000000000..20245564fdd --- /dev/null +++ b/zk/stages/stage_sequence_execute_state.go @@ -0,0 +1,253 @@ +package stages + +import ( + "context" + "fmt" + "math" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/zk/l1_data" + zktx "github.com/ledgerwatch/erigon/zk/tx" + "github.com/ledgerwatch/erigon/zk/txpool" +) + +type BatchContext struct { + ctx context.Context + cfg *SequenceBlockCfg + historyCfg *stagedsync.HistoryCfg + s *stagedsync.StageState + sdb *stageDb +} + +func newBatchContext(ctx context.Context, cfg *SequenceBlockCfg, historyCfg *stagedsync.HistoryCfg, s *stagedsync.StageState, sdb *stageDb) *BatchContext { + return &BatchContext{ + ctx: ctx, + cfg: cfg, + historyCfg: historyCfg, + s: s, + sdb: sdb, + } +} + +// TYPE BATCH STATE +type BatchState struct { + forkId uint64 + batchNumber uint64 + hasExecutorForThisBatch bool + hasAnyTransactionsInThisBatch bool + builtBlocks []uint64 + yieldedTransactions mapset.Set[[32]byte] + blockState *BlockState + batchL1RecoveryData *BatchL1RecoveryData + limboRecoveryData *LimboRecoveryData +} + +func newBatchState(forkId, batchNumber uint64, hasExecutorForThisBatch, l1Recovery bool, txPool *txpool.TxPool) *BatchState { + batchState := &BatchState{ + forkId: forkId, + batchNumber: batchNumber, + hasExecutorForThisBatch: hasExecutorForThisBatch, + hasAnyTransactionsInThisBatch: false, + builtBlocks: make([]uint64, 0, 128), + yieldedTransactions: mapset.NewSet[[32]byte](), + blockState: newBlockState(), + batchL1RecoveryData: nil, + limboRecoveryData: nil, + } + + if l1Recovery { + batchState.batchL1RecoveryData = newBatchL1RecoveryData(batchState) + } + + limboHeaderTimestamp, limboTxHash := txPool.GetLimboTxHash(batchState.batchNumber) + if limboTxHash != nil { + batchState.limboRecoveryData = newLimboRecoveryData(limboHeaderTimestamp, limboTxHash) + } + + return batchState +} + +func (bs *BatchState) isL1Recovery() bool { + return bs.batchL1RecoveryData != nil +} + +func (bs *BatchState) isLimboRecovery() bool { + return bs.limboRecoveryData != nil +} + +func (bs *BatchState) isAnyRecovery() bool { + return bs.isL1Recovery() || bs.isLimboRecovery() +} + +func (bs *BatchState) isThereAnyTransactionsToRecover() bool { + if !bs.isL1Recovery() { + return false + } + + return bs.blockState.hasAnyTransactionForInclusion() || bs.batchL1RecoveryData.recoveredBatchData.IsWorkRemaining +} + +func (bs *BatchState) loadBlockL1RecoveryData(decodedBlocksIndex uint64) bool { + decodedBatchL2Data, found := bs.batchL1RecoveryData.getDecodedL1RecoveredBatchDataByIndex(decodedBlocksIndex) + bs.blockState.setBlockL1RecoveryData(decodedBatchL2Data) + return found +} + +// if not limbo set the limboHeaderTimestamp to the "default" value for "prepareHeader" function +func (bs *BatchState) getBlockHeaderForcedTimestamp() uint64 { + if bs.isLimboRecovery() { + return bs.limboRecoveryData.limboHeaderTimestamp + } + + return math.MaxUint64 +} + +func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { + if bs.isL1Recovery() { + return bs.batchL1RecoveryData.recoveredBatchData.Coinbase + } + + return cfg.zk.AddressSequencer +} + +func (bs *BatchState) onAddedTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { + bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) + bs.hasAnyTransactionsInThisBatch = true +} + +func (bs *BatchState) onBuiltBlock(blockNumber uint64) { + bs.builtBlocks = append(bs.builtBlocks, blockNumber) +} + +// TYPE BATCH L1 RECOVERY DATA +type BatchL1RecoveryData struct { + recoveredBatchDataSize int + recoveredBatchData *l1_data.DecodedL1Data + batchState *BatchState +} + +func newBatchL1RecoveryData(batchState *BatchState) *BatchL1RecoveryData { + return &BatchL1RecoveryData{ + batchState: batchState, + } +} + +func (batchL1RecoveryData *BatchL1RecoveryData) loadBatchData(sdb *stageDb) (err error) { + batchL1RecoveryData.recoveredBatchData, err = l1_data.BreakDownL1DataByBatch(batchL1RecoveryData.batchState.batchNumber, batchL1RecoveryData.batchState.forkId, sdb.hermezDb.HermezDbReader) + if err != nil { + return err + } + + batchL1RecoveryData.recoveredBatchDataSize = len(batchL1RecoveryData.recoveredBatchData.DecodedData) + return nil +} + +func (batchL1RecoveryData *BatchL1RecoveryData) hasAnyDecodedBlocks() bool { + return batchL1RecoveryData.recoveredBatchDataSize > 0 +} + +func (batchL1RecoveryData *BatchL1RecoveryData) getInfoTreeIndex(sdb *stageDb) (uint64, error) { + var infoTreeIndex uint64 + + if batchL1RecoveryData.recoveredBatchData.L1InfoRoot == SpecialZeroIndexHash { + return uint64(0), nil + } + + infoTreeIndex, found, err := sdb.hermezDb.GetL1InfoTreeIndexByRoot(batchL1RecoveryData.recoveredBatchData.L1InfoRoot) + if err != nil { + return uint64(0), err + } + if !found { + return uint64(0), fmt.Errorf("could not find L1 info tree index for root %s", batchL1RecoveryData.recoveredBatchData.L1InfoRoot.String()) + } + + return infoTreeIndex, nil +} + +func (batchL1RecoveryData *BatchL1RecoveryData) getDecodedL1RecoveredBatchDataByIndex(decodedBlocksIndex uint64) (*zktx.DecodedBatchL2Data, bool) { + if decodedBlocksIndex == uint64(batchL1RecoveryData.recoveredBatchDataSize) { + return nil, false + } + + return &batchL1RecoveryData.recoveredBatchData.DecodedData[decodedBlocksIndex], true +} + +// TYPE LIMBO RECOVERY DATA +type LimboRecoveryData struct { + limboHeaderTimestamp uint64 + limboTxHash *common.Hash +} + +func newLimboRecoveryData(limboHeaderTimestamp uint64, limboTxHash *common.Hash) *LimboRecoveryData { + return &LimboRecoveryData{ + limboHeaderTimestamp: limboHeaderTimestamp, + limboTxHash: limboTxHash, + } +} + +// TYPE BLOCK STATE +type BlockState struct { + transactionsForInclusion []types.Transaction + builtBlockElements BuiltBlockElements + blockL1RecoveryData *zktx.DecodedBatchL2Data +} + +func newBlockState() *BlockState { + return &BlockState{} +} + +func (bs *BlockState) hasAnyTransactionForInclusion() bool { + return len(bs.transactionsForInclusion) > 0 +} + +func (bs *BlockState) setBlockL1RecoveryData(blockL1RecoveryData *zktx.DecodedBatchL2Data) { + bs.blockL1RecoveryData = blockL1RecoveryData + + if bs.blockL1RecoveryData != nil { + bs.transactionsForInclusion = bs.blockL1RecoveryData.Transactions + } else { + bs.transactionsForInclusion = []types.Transaction{} + } +} + +func (bs *BlockState) getDeltaTimestamp() uint64 { + if bs.blockL1RecoveryData != nil { + return uint64(bs.blockL1RecoveryData.DeltaTimestamp) + } + + return math.MaxUint64 +} + +func (bs *BlockState) getL1EffectiveGases(cfg SequenceBlockCfg, i int) uint8 { + if bs.blockL1RecoveryData != nil { + return bs.blockL1RecoveryData.EffectiveGasPricePercentages[i] + } + + return DeriveEffectiveGasPrice(cfg, bs.transactionsForInclusion[i]) +} + +// TYPE BLOCK ELEMENTS +type BuiltBlockElements struct { + transactions []types.Transaction + receipts types.Receipts + effectiveGases []uint8 + executionResults []*core.ExecutionResult +} + +func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { + bbe.transactions = []types.Transaction{} + bbe.receipts = types.Receipts{} + bbe.effectiveGases = []uint8{} + bbe.executionResults = []*core.ExecutionResult{} +} + +func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { + bbe.transactions = append(bbe.transactions, transaction) + bbe.receipts = append(bbe.receipts, receipt) + bbe.executionResults = append(bbe.executionResults, execResult) + bbe.effectiveGases = append(bbe.effectiveGases, effectiveGas) +} diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 88877075bc1..5348eace90d 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -2,7 +2,6 @@ package stages import ( "context" - "encoding/binary" "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/kv" @@ -11,7 +10,6 @@ import ( "io" mapset "github.com/deckarep/golang-set/v2" - "github.com/gateway-fm/cdk-erigon-lib/common/length" types2 "github.com/gateway-fm/cdk-erigon-lib/types" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" @@ -19,13 +17,14 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/zk/hermez_db" - zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" ) func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, error) { + cfg.txPool.LockFlusher() + defer cfg.txPool.UnlockFlusher() + var transactions []types.Transaction var err error @@ -50,6 +49,9 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio } func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { + cfg.txPool.LockFlusher() + defer cfg.txPool.UnlockFlusher() + var transactions []types.Transaction // ensure we don't spin forever looking for transactions, attempt for a while then exit up to the caller if err := cfg.txPoolDb.View(ctx, func(poolTx kv.Tx) error { @@ -73,56 +75,6 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm return transactions, nil } -func getNextL1BatchData(batchNumber uint64, forkId uint64, hermezDb *hermez_db.HermezDb) (*nextBatchL1Data, error) { - nextData := &nextBatchL1Data{} - // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync - // stage, if it is not there we need to panic as we're in a bad state - batchL2Data, err := hermezDb.GetL1BatchData(batchNumber) - if err != nil { - return nextData, err - } - - if len(batchL2Data) == 0 { - // end of the line for batch recovery so return empty - return nextData, nil - } - - nextData.Coinbase = common.BytesToAddress(batchL2Data[:length.Addr]) - nextData.L1InfoRoot = common.BytesToHash(batchL2Data[length.Addr : length.Addr+length.Hash]) - tsBytes := batchL2Data[length.Addr+length.Hash : length.Addr+length.Hash+8] - nextData.LimitTimestamp = binary.BigEndian.Uint64(tsBytes) - batchL2Data = batchL2Data[length.Addr+length.Hash+8:] - - nextData.DecodedData, err = zktx.DecodeBatchL2Blocks(batchL2Data, forkId) - if err != nil { - return nextData, err - } - - // no data means no more work to do - end of the line - if len(nextData.DecodedData) == 0 { - return nextData, nil - } - - nextData.IsWorkRemaining = true - transactionsInBatch := 0 - for _, batch := range nextData.DecodedData { - transactionsInBatch += len(batch.Transactions) - } - if transactionsInBatch == 0 { - // we need to check if this batch should simply be empty or not so we need to check against the - // highest known batch number to see if we have work to do still - highestKnown, err := hermezDb.GetLastL1BatchData() - if err != nil { - return nextData, err - } - if batchNumber >= highestKnown { - nextData.IsWorkRemaining = false - } - } - - return nextData, err -} - func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, error) { transactions := make([]types.Transaction, 0, len(slot.Txs)) reader := bytes.NewReader([]byte{}) @@ -226,7 +178,7 @@ func attemptAddTransaction( if overflow { ibs.RevertToSnapshot(snapshot) - return nil, nil, true, err + return nil, nil, true, nil } // add the gas only if not reverted. This should not be moved above the overflow check @@ -240,5 +192,5 @@ func attemptAddTransaction( ibs.FinalizeTx(evm.ChainRules(), noop) - return receipt, execResult, overflow, err + return receipt, execResult, false, nil } diff --git a/zk/stages/stage_sequence_execute_unwind.go b/zk/stages/stage_sequence_execute_unwind.go index dca61c79a2b..46c0a58846f 100644 --- a/zk/stages/stage_sequence_execute_unwind.go +++ b/zk/stages/stage_sequence_execute_unwind.go @@ -4,13 +4,10 @@ import ( "context" "fmt" - "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/common/hexutility" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/zk/hermez_db" ) @@ -47,6 +44,9 @@ func UnwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.Stage func unwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, ctx context.Context, cfg SequenceBlockCfg, initialCycle bool) error { hermezDb := hermez_db.NewHermezDb(tx) fromBatch, err := hermezDb.GetBatchNoByL2Block(u.UnwindPoint) + if err != nil { + return err + } if err := stagedsync.UnwindExecutionStageErigon(u, s, tx, ctx, cfg.toErigonExecuteBlockCfg(), initialCycle); err != nil { return err @@ -60,7 +60,7 @@ func unwindSequenceExecutionStage(u *stagedsync.UnwindState, s *stagedsync.Stage return err } - if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, 1); err != nil { + if err = updateSequencerProgress(tx, u.UnwindPoint, fromBatch, true); err != nil { return err } @@ -96,20 +96,33 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("get toBatch no by l2 block error: %v", err) } + lastBatchToKeepBeforeFrom, err := hermezDb.GetBatchNoByL2Block(u.UnwindPoint) + if err != nil { + return fmt.Errorf("get fromBatch no by l2 block error: %v", err) + } + fromBatchForForkIdDeletion := fromBatch + if lastBatchToKeepBeforeFrom == fromBatch { + fromBatchForForkIdDeletion++ + } + // only seq - if err = hermezDb.TruncateLatestUsedGers(fromBatch); err != nil { + if err = hermezDb.DeleteLatestUsedGers(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate latest used gers error: %v", err) } // only seq - if err = hermezDb.TruncateBlockGlobalExitRoot(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockGlobalExitRoots(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block ger error: %v", err) } // only seq - if err = hermezDb.TruncateBlockL1BlockHash(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockL1BlockHashes(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block l1 block hash error: %v", err) } // only seq - if err = hermezDb.TruncateBlockL1InfoTreeIndex(u.UnwindPoint+1, s.BlockNumber); err != nil { + if err = hermezDb.DeleteBlockL1InfoTreeIndexes(u.UnwindPoint+1, s.BlockNumber); err != nil { + return fmt.Errorf("truncate block l1 info tree index error: %v", err) + } + // only seq + if err = hermezDb.DeleteBlockL1InfoTreeIndexesProgress(u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("truncate block l1 info tree index error: %v", err) } // only seq @@ -117,19 +130,22 @@ func UnwindSequenceExecutionStageDbWrites(ctx context.Context, u *stagedsync.Unw return fmt.Errorf("truncate block batches error: %v", err) } // only seq - if err = hermezDb.TruncateForkId(fromBatch, toBatch); err != nil { + if err = hermezDb.DeleteForkIds(fromBatchForForkIdDeletion, toBatch); err != nil { return fmt.Errorf("truncate fork id error: %v", err) } - - return nil -} - -func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { - var address common.Address - copy(address[:], key) - if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err2 := db.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { - copy(acc.CodeHash[:], codeHash) + // only seq + if err = hermezDb.DeleteBatchCounters(u.UnwindPoint+1, s.BlockNumber); err != nil { + return fmt.Errorf("truncate block batches error: %v", err) + } + // only seq + if err = hermezDb.TruncateIsBatchPartiallyProcessed(fromBatch, toBatch); err != nil { + return fmt.Errorf("truncate fork id error: %v", err) + } + if lastBatchToKeepBeforeFrom == fromBatch { + if err = hermezDb.WriteIsBatchPartiallyProcessed(lastBatchToKeepBeforeFrom); err != nil { + return fmt.Errorf("truncate fork id error: %v", err) } } + + return nil } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 0443419993d..b23412c9801 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -1,7 +1,6 @@ package stages import ( - "context" "time" "github.com/c2h5oh/datasize" @@ -18,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon/chain" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -27,13 +25,12 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" - db2 "github.com/ledgerwatch/erigon/smt/pkg/db" - smtNs "github.com/ledgerwatch/erigon/smt/pkg/smt" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" "github.com/ledgerwatch/erigon/zk/tx" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/txpool" @@ -43,22 +40,14 @@ import ( ) const ( - logInterval = 20 * time.Second - - // stateStreamLimit - don't accumulate state changes if jump is bigger than this amount of blocks - stateStreamLimit uint64 = 1_000 - + logInterval = 20 * time.Second transactionGasLimit = 30000000 - - // this is the max number of send transactions that can be included in a block without overflowing counters - // this is for simple send transactions, any other type would consume more counters - // - preForkId11TxLimit = 444 ) var ( - noop = state.NewNoopWriter() - blockDifficulty = new(big.Int).SetUint64(0) + noop = state.NewNoopWriter() + blockDifficulty = new(big.Int).SetUint64(0) + SpecialZeroIndexHash = common.HexToHash("0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757") ) type HasChangeSetWriter interface { @@ -90,7 +79,8 @@ type SequenceBlockCfg struct { txPool *txpool.TxPool txPoolDb kv.RwDB - yieldSize uint16 + legacyVerifier *verifier.LegacyExecutorVerifier + yieldSize uint16 } func StageSequenceBlocksCfg( @@ -116,6 +106,7 @@ func StageSequenceBlocksCfg( txPool *txpool.TxPool, txPoolDb kv.RwDB, + legacyVerifier *verifier.LegacyExecutorVerifier, yieldSize uint16, ) SequenceBlockCfg { @@ -141,6 +132,7 @@ func StageSequenceBlocksCfg( zk: zk, txPool: txPool, txPoolDb: txPoolDb, + legacyVerifier: legacyVerifier, yieldSize: yieldSize, } } @@ -168,36 +160,6 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC ) } -type stageDb struct { - tx kv.RwTx - hermezDb *hermez_db.HermezDb - eridb *db2.EriDb - stateReader *state.PlainStateReader - smt *smtNs.SMT -} - -func newStageDb(tx kv.RwTx) *stageDb { - sdb := &stageDb{} - sdb.SetTx(tx) - return sdb -} - -func (sdb *stageDb) SetTx(tx kv.RwTx) { - sdb.tx = tx - sdb.hermezDb = hermez_db.NewHermezDb(tx) - sdb.eridb = db2.NewEriDb(tx) - sdb.stateReader = state.NewPlainStateReader(tx) - sdb.smt = smtNs.NewSMT(sdb.eridb, false) -} - -type nextBatchL1Data struct { - DecodedData []zktx.DecodedBatchL2Data - Coinbase common.Address - L1InfoRoot common.Hash - IsWorkRemaining bool - LimitTimestamp uint64 -} - type forkDb interface { GetAllForkHistory() ([]uint64, []uint64, error) GetLatestForkHistory() (uint64, uint64, error) @@ -277,7 +239,7 @@ func prepareHeader(tx kv.RwTx, previousBlockNumber, deltaTimestamp, forcedTimest }, parentBlock, nil } -func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBatchL2Data, l1Recovery bool, proposedTimestamp uint64) ( +func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, proposedTimestamp uint64) ( infoTreeIndexProgress uint64, l1TreeUpdate *zktypes.L1InfoTreeUpdate, l1TreeUpdateIndex uint64, @@ -291,12 +253,12 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBa // we keep track of this here shouldWriteGerToContract = true - if infoTreeIndexProgress, err = stages.GetStageProgress(sdb.tx, stages.HighestUsedL1InfoIndex); err != nil { + if _, infoTreeIndexProgress, err = sdb.hermezDb.GetLatestBlockL1InfoTreeIndexProgress(); err != nil { return } - if l1Recovery { - l1TreeUpdateIndex = uint64(decodedBlock.L1InfoTreeIndex) + if batchState.isL1Recovery() { + l1TreeUpdateIndex = uint64(batchState.blockState.blockL1RecoveryData.L1InfoTreeIndex) if l1TreeUpdate, err = sdb.hermezDb.GetL1InfoTreeUpdate(l1TreeUpdateIndex); err != nil { return } @@ -321,6 +283,14 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBa return } +func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ticker) { + batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) + logTicker := time.NewTicker(10 * time.Second) + blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) + + return batchTicker, logTicker, blockTicker +} + // will be called at the start of every new block created within a batch to figure out if there is a new GER // we can use or not. In the special case that this is the first block we just return 0 as we need to use the // 0 index first before we can use 1+ @@ -342,7 +312,7 @@ func calculateNextL1TreeUpdateToUse(lastInfoIndex uint64, hermezDb *hermez_db.He return nextL1Index, l1Info, nil } -func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1InfoIndex uint64) error { +func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, unwinding bool) error { // now update stages that will be used later on in stageloop.go and other stages. As we're the sequencer // we won't have headers stage for example as we're already writing them here if err := stages.SaveStageProgress(tx, stages.Execution, newHeight); err != nil { @@ -354,56 +324,31 @@ func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, l1In if err := stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, newBatch); err != nil { return err } - if err := stages.SaveStageProgress(tx, stages.HighestUsedL1InfoIndex, l1InfoIndex); err != nil { - return err - } - return nil -} + if !unwinding { + if err := stages.SaveStageProgress(tx, stages.IntermediateHashes, newHeight); err != nil { + return err + } -func doFinishBlockAndUpdateState( - ctx context.Context, - cfg SequenceBlockCfg, - s *stagedsync.StageState, - sdb *stageDb, - ibs *state.IntraBlockState, - header *types.Header, - parentBlock *types.Block, - forkId uint64, - thisBatch uint64, - ger common.Hash, - l1BlockHash common.Hash, - transactions []types.Transaction, - receipts types.Receipts, - execResults []*core.ExecutionResult, - effectiveGases []uint8, - l1InfoIndex uint64, - l1Recovery bool, -) (*types.Block, error) { - thisBlockNumber := header.Number.Uint64() - - if cfg.accumulator != nil { - cfg.accumulator.StartChange(thisBlockNumber, header.Hash(), nil, false) - } + if err := stages.SaveStageProgress(tx, stages.AccountHistoryIndex, newHeight); err != nil { + return err + } - block, err := finaliseBlock(ctx, cfg, s, sdb, ibs, header, parentBlock, forkId, thisBatch, cfg.accumulator, ger, l1BlockHash, transactions, receipts, execResults, effectiveGases, l1Recovery) - if err != nil { - return nil, err + if err := stages.SaveStageProgress(tx, stages.StorageHistoryIndex, newHeight); err != nil { + return err + } } - if err := updateSequencerProgress(sdb.tx, thisBlockNumber, thisBatch, l1InfoIndex); err != nil { - return nil, err - } + return nil +} - if cfg.accumulator != nil { - txs, err := rawdb.RawTransactionsRange(sdb.tx, thisBlockNumber, thisBlockNumber) - if err != nil { - return nil, err +func tryHaltSequencer(batchContext *BatchContext, thisBatch uint64) { + if batchContext.cfg.zk.SequencerHaltOnBatchNumber != 0 && batchContext.cfg.zk.SequencerHaltOnBatchNumber == thisBatch { + for { + log.Info(fmt.Sprintf("[%s] Halt sequencer on batch %d...", batchContext.s.LogPrefix(), thisBatch)) + time.Sleep(5 * time.Second) //nolint:gomnd } - cfg.accumulator.ChangeTransactions(txs) } - - return block, nil } type batchChecker interface { @@ -467,7 +412,7 @@ type BlockDataChecker struct { counter uint64 // counter amount of bytes } -func NewBlockDataChecker() *BlockDataChecker { +func newBlockDataChecker() *BlockDataChecker { return &BlockDataChecker{ limit: LIMIT_120_KB, counter: 0, @@ -476,7 +421,7 @@ func NewBlockDataChecker() *BlockDataChecker { // adds bytes amounting to the block data and checks if the limit is reached // if the limit is reached, the data is not added, so this can be reused again for next check -func (bdc *BlockDataChecker) AddBlockStartData(deltaTimestamp, l1InfoTreeIndex uint32) bool { +func (bdc *BlockDataChecker) AddBlockStartData() bool { blockStartBytesAmount := tx.START_BLOCK_BATCH_L2_DATA_SIZE // tx.GenerateStartBlockBatchL2Data(deltaTimestamp, l1InfoTreeIndex) returns 65 long byte array // add in the changeL2Block transaction if bdc.counter+blockStartBytesAmount > bdc.limit { diff --git a/zk/stages/stage_sequence_execute_utils_db.go b/zk/stages/stage_sequence_execute_utils_db.go new file mode 100644 index 00000000000..4484d9dbd72 --- /dev/null +++ b/zk/stages/stage_sequence_execute_utils_db.go @@ -0,0 +1,59 @@ +package stages + +import ( + "context" + + "github.com/gateway-fm/cdk-erigon-lib/kv" + + "github.com/ledgerwatch/erigon/core/state" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + smtNs "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +type stageDb struct { + ctx context.Context + db kv.RwDB + + tx kv.RwTx + hermezDb *hermez_db.HermezDb + eridb *db2.EriDb + stateReader *state.PlainStateReader + smt *smtNs.SMT +} + +func newStageDb(ctx context.Context, db kv.RwDB) (sdb *stageDb, err error) { + var tx kv.RwTx + if tx, err = db.BeginRw(ctx); err != nil { + return nil, err + } + + sdb = &stageDb{ + ctx: ctx, + db: db, + } + sdb.SetTx(tx) + return sdb, nil +} + +func (sdb *stageDb) SetTx(tx kv.RwTx) { + sdb.tx = tx + sdb.hermezDb = hermez_db.NewHermezDb(tx) + sdb.eridb = db2.NewEriDb(tx) + sdb.stateReader = state.NewPlainStateReader(tx) + sdb.smt = smtNs.NewSMT(sdb.eridb, false) +} + +func (sdb *stageDb) CommitAndStart() (err error) { + if err = sdb.tx.Commit(); err != nil { + return err + } + + tx, err := sdb.db.BeginRw(sdb.ctx) + if err != nil { + return err + } + + sdb.SetTx(tx) + return nil +} diff --git a/zk/stages/stage_sequencer_executor_verify.go b/zk/stages/stage_sequencer_executor_verify.go deleted file mode 100644 index 68299035c7d..00000000000 --- a/zk/stages/stage_sequencer_executor_verify.go +++ /dev/null @@ -1,381 +0,0 @@ -package stages - -import ( - "context" - "math" - "time" - - "bytes" - "errors" - "sort" - - "fmt" - - "github.com/gateway-fm/cdk-erigon-lib/kv" - "github.com/ledgerwatch/erigon/chain" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/zk/hermez_db" - "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" - "github.com/ledgerwatch/erigon/zk/txpool" - "github.com/ledgerwatch/log/v3" -) - -type SequencerExecutorVerifyCfg struct { - db kv.RwDB - verifier *legacy_executor_verifier.LegacyExecutorVerifier - txPool *txpool.TxPool - chainConfig *chain.Config - cfgZk *ethconfig.Zk -} - -func StageSequencerExecutorVerifyCfg( - db kv.RwDB, - verifier *legacy_executor_verifier.LegacyExecutorVerifier, - pool *txpool.TxPool, - chainConfig *chain.Config, - cfgZk *ethconfig.Zk, -) SequencerExecutorVerifyCfg { - return SequencerExecutorVerifyCfg{ - db: db, - verifier: verifier, - txPool: pool, - chainConfig: chainConfig, - cfgZk: cfgZk, - } -} - -func SpawnSequencerExecutorVerifyStage( - s *stagedsync.StageState, - u stagedsync.Unwinder, - tx kv.RwTx, - ctx context.Context, - cfg SequencerExecutorVerifyCfg, - quiet bool, -) error { - logPrefix := s.LogPrefix() - log.Info(fmt.Sprintf("[%s] Starting sequencer verify stage", logPrefix)) - defer log.Info(fmt.Sprintf("[%s] Finished sequencer verify stage", logPrefix)) - - var err error - freshTx := tx == nil - if freshTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - hermezDb := hermez_db.NewHermezDb(tx) - hermezDbReader := hermez_db.NewHermezDbReader(tx) - - // progress here is at the batch level - progress, err := stages.GetStageProgress(tx, stages.SequenceExecutorVerify) - if err != nil { - return err - } - - // progress here is at the block level - executeProgress, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return err - } - - // we need to get the batch number for the latest block, so we can search for new batches to send for - // verification - latestBatch, err := hermezDb.GetBatchNoByL2Block(executeProgress) - if err != nil { - return err - } - - isBatchPartial, err := hermezDb.GetIsBatchPartiallyProcessed(latestBatch) - if err != nil { - return err - } - // we could be running in a state with no executors so we need instant response that we are in an - // ok state to save lag in the data stream !!Dragons: there will be no witnesses stored running in - // this mode of operation - canVerify := cfg.verifier.HasExecutorsUnsafe() - - // if batch was stopped intermediate and is not finished - we need to finish it first - // this shouldn't occur since exec stage is before that and should finish the batch - // but just in case something unexpected happens - if isBatchPartial { - log.Error(fmt.Sprintf("[%s] batch %d is not fully processed in stage_execute", logPrefix, latestBatch)) - canVerify = false - } - - if !canVerify { - if latestBatch == injectedBatchNumber { - return nil - } - - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, latestBatch); err != nil { - return err - } - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil - } - - // get ordered promises from the verifier - // NB: this call is where the stream write happens (so it will be delayed until this stage is run) - responses, err := cfg.verifier.ProcessResultsSequentiallyUnsafe(tx) - if err != nil { - //TODO: what happen with promises if this request returns here? - return err - } - - for _, response := range responses { - // ensure that the first response is the next batch based on the current stage progress - // otherwise just return early until we get it - if response.BatchNumber != progress+1 { - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil - } - - // now check that we are indeed in a good state to continue - if !response.Valid { - if cfg.cfgZk.Limbo { - log.Info(fmt.Sprintf("[%s] identified an invalid batch, entering limbo", s.LogPrefix()), "batch", response.BatchNumber) - // we have an invalid batch, so we need to notify the txpool that these transactions are spurious - // and need to go into limbo and then trigger a rewind. The rewind will put all TX back into the - // pool, but as it knows about these limbo transactions it will place them into limbo instead - // of queueing them again - - // now we need to figure out the highest block number in the batch - // and grab all the transaction hashes along the way to inform the - // pool of hashes to avoid - blockNumbers, err := hermezDb.GetL2BlockNosByBatch(response.BatchNumber) - if err != nil { - return err - } - if len(blockNumbers) == 0 { - panic("failing to verify a batch without blocks") - } - sort.Slice(blockNumbers, func(i, j int) bool { - return blockNumbers[i] < blockNumbers[j] - }) - - var lowestBlock, highestBlock *types.Block - forkId, err := hermezDb.GetForkId(response.BatchNumber) - if err != nil { - return err - } - - l1InfoTreeMinTimestamps := make(map[uint64]uint64) - if _, err = cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { - return err - } - - limboSendersToPreviousTxMap := make(map[string]uint32) - limboStreamBytesBuilderHelper := newLimboStreamBytesBuilderHelper() - - limboDetails := txpool.NewLimboBatchDetails() - limboDetails.Witness = response.Witness - limboDetails.L1InfoTreeMinTimestamps = l1InfoTreeMinTimestamps - limboDetails.BatchNumber = response.BatchNumber - limboDetails.ForkId = forkId - - for _, blockNumber := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(tx, blockNumber) - if err != nil { - return err - } - highestBlock = block - if lowestBlock == nil { - // capture the first block, then we can set the bad block hash in the unwind to terminate the - // stage loop and broadcast the accumulator changes to the txpool before the next stage loop run - lowestBlock = block - } - - for i, transaction := range block.Transactions() { - var b []byte - buffer := bytes.NewBuffer(b) - err = transaction.EncodeRLP(buffer) - if err != nil { - return err - } - - signer := types.MakeSigner(cfg.chainConfig, blockNumber) - sender, err := transaction.Sender(*signer) - if err != nil { - return err - } - senderMapKey := sender.Hex() - - blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - streamBytes, err := cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) - if err != nil { - return err - } - - previousTxIndex, ok := limboSendersToPreviousTxMap[senderMapKey] - if !ok { - previousTxIndex = math.MaxUint32 - } - - hash := transaction.Hash() - limboTxCount := limboDetails.AppendTransaction(buffer.Bytes(), streamBytes, hash, sender, previousTxIndex) - limboSendersToPreviousTxMap[senderMapKey] = limboTxCount - 1 - - log.Info(fmt.Sprintf("[%s] adding transaction to limbo", s.LogPrefix()), "hash", hash) - } - } - - limboDetails.TimestampLimit = highestBlock.Time() - limboDetails.FirstBlockNumber = lowestBlock.NumberU64() - cfg.txPool.ProcessLimboBatchDetails(limboDetails) - - u.UnwindTo(lowestBlock.NumberU64()-1, lowestBlock.Hash()) - cfg.verifier.CancelAllRequestsUnsafe() - return nil - } else { - // this infinite loop will make the node to print the error once every minute therefore preventing it for creating new blocks - for { - time.Sleep(time.Minute) - log.Error(fmt.Sprintf("[%s] identified an invalid batch with number %d", s.LogPrefix(), response.BatchNumber)) - } - } - } - - // all good so just update the stage progress for now - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, response.BatchNumber); err != nil { - return err - } - - // we know that if the batch has been marked as OK we can update the datastream progress to match - // as the verifier will have handled writing to the stream - highestBlock, err := hermezDb.GetHighestBlockInBatch(response.BatchNumber) - if err != nil { - return err - } - - if err = stages.SaveStageProgress(tx, stages.DataStream, highestBlock); err != nil { - return err - } - - // store the witness - errWitness := hermezDb.WriteWitness(response.BatchNumber, response.Witness) - if errWitness != nil { - log.Warn("Failed to write witness", "batch", response.BatchNumber, "err", errWitness) - } - - cfg.verifier.MarkTopResponseAsProcessed(response.BatchNumber) - progress = response.BatchNumber - } - - // send off the new batches to the verifier to be processed - for batch := progress + 1; batch <= latestBatch; batch++ { - // we do not need to verify batch 1 as this is the injected batch so just updated progress and move on - if batch == injectedBatchNumber { - if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, injectedBatchNumber); err != nil { - return err - } - } else { - if cfg.verifier.IsRequestAddedUnsafe(batch) { - continue - } - - // we need the state root of the last block in the batch to send to the executor - highestBlock, err := hermezDb.GetHighestBlockInBatch(batch) - if err != nil { - return err - } - if highestBlock == 0 { - // maybe nothing in this batch and we know we don't handle batch 0 (genesis) - continue - } - block, err := rawdb.ReadBlockByNumber(tx, highestBlock) - if err != nil { - return err - } - - counters, found, err := hermezDb.GetBatchCounters(batch) - if err != nil { - return err - } - if !found { - return errors.New("batch counters not found") - } - - forkId, err := hermezDb.GetForkId(batch) - if err != nil { - return err - } - - if forkId == 0 { - return errors.New("the network cannot have a 0 fork id") - } - - cfg.verifier.AddRequestUnsafe(legacy_executor_verifier.NewVerifierRequest(batch, forkId, block.Root(), counters), cfg.cfgZk.SequencerBatchSealTime) - } - } - - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - - return nil -} - -func UnwindSequencerExecutorVerifyStage( - u *stagedsync.UnwindState, - s *stagedsync.StageState, - tx kv.RwTx, - ctx context.Context, - cfg SequencerExecutorVerifyCfg, -) (err error) { - /* - The "Unwinder" keeps stage's progress in blocks. - If a stage's current progress is <= unwindPoint then the unwind is not invoked for this stage (sync.go line 386) - For this particular case, the progress is in batches => its progress is always <= unwindPoint, because unwindPoint is in blocks - This is not a problem, because this stage's progress actually keeps the number of last verified batch and we never unwind the last verified batch - */ - - // freshTx := tx == nil - // if freshTx { - // tx, err = cfg.db.BeginRw(ctx) - // if err != nil { - // return err - // } - // defer tx.Rollback() - // } - - // logPrefix := u.LogPrefix() - // log.Info(fmt.Sprintf("[%s] Unwind Executor Verify", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint) - - // if err = u.Done(tx); err != nil { - // return err - // } - - // if freshTx { - // if err = tx.Commit(); err != nil { - // return err - // } - // } - - return nil -} - -func PruneSequencerExecutorVerifyStage( - s *stagedsync.PruneState, - tx kv.RwTx, - cfg SequencerExecutorVerifyCfg, - ctx context.Context, -) error { - return nil -} diff --git a/zk/stages/stage_sequencer_executor_verify_limbo.go b/zk/stages/stage_sequencer_executor_verify_limbo.go deleted file mode 100644 index a1328dd9dcc..00000000000 --- a/zk/stages/stage_sequencer_executor_verify_limbo.go +++ /dev/null @@ -1,56 +0,0 @@ -package stages - -type limboStreamBytesGroup struct { - blockNumber uint64 - transactionsIndicesInBlock []int -} - -func newLimboStreamBytesGroup(blockNumber uint64) *limboStreamBytesGroup { - return &limboStreamBytesGroup{ - blockNumber: blockNumber, - transactionsIndicesInBlock: make([]int, 0, 1), - } -} - -type limboStreamBytesBuilderHelper struct { - sendersToGroupMap map[string][]*limboStreamBytesGroup -} - -func newLimboStreamBytesBuilderHelper() *limboStreamBytesBuilderHelper { - return &limboStreamBytesBuilderHelper{ - sendersToGroupMap: make(map[string][]*limboStreamBytesGroup), - } -} - -func (_this *limboStreamBytesBuilderHelper) append(senderMapKey string, blockNumber uint64, transactionIndex int) ([]uint64, [][]int) { - limboStreamBytesGroups := _this.add(senderMapKey, blockNumber, transactionIndex) - - size := len(limboStreamBytesGroups) - resultBlocks := make([]uint64, size) - resultTransactionsSet := make([][]int, size) - - for i := 0; i < size; i++ { - group := limboStreamBytesGroups[i] - resultBlocks[i] = group.blockNumber - resultTransactionsSet[i] = group.transactionsIndicesInBlock - } - - return resultBlocks, resultTransactionsSet -} - -func (_this *limboStreamBytesBuilderHelper) add(senderMapKey string, blockNumber uint64, transactionIndex int) []*limboStreamBytesGroup { - limboStreamBytesGroups, ok := _this.sendersToGroupMap[senderMapKey] - if !ok { - limboStreamBytesGroups = []*limboStreamBytesGroup{newLimboStreamBytesGroup(blockNumber)} - _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups - } - group := limboStreamBytesGroups[len(limboStreamBytesGroups)-1] - if group.blockNumber != blockNumber { - group = newLimboStreamBytesGroup(blockNumber) - limboStreamBytesGroups = append(limboStreamBytesGroups, group) - _this.sendersToGroupMap[senderMapKey] = limboStreamBytesGroups - } - group.transactionsIndicesInBlock = append(group.transactionsIndicesInBlock, transactionIndex) - - return limboStreamBytesGroups -} diff --git a/zk/stages/stage_sequencer_interhashes.go b/zk/stages/stage_sequencer_interhashes.go index 2a1d0883f6d..ddef9f0d9ba 100644 --- a/zk/stages/stage_sequencer_interhashes.go +++ b/zk/stages/stage_sequencer_interhashes.go @@ -5,59 +5,18 @@ import ( "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/turbo/shards" ) -type SequencerInterhashesCfg struct { - db kv.RwDB - accumulator *shards.Accumulator -} - -func StageSequencerInterhashesCfg( - db kv.RwDB, - accumulator *shards.Accumulator, -) SequencerInterhashesCfg { - return SequencerInterhashesCfg{ - db: db, - accumulator: accumulator, - } -} - // This stages does NOTHING while going forward, because its done during execution +// Even this stage progress is updated in execution stage func SpawnSequencerInterhashesStage( s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, ctx context.Context, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, quiet bool, ) error { - var err error - - freshTx := tx == nil - if freshTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - to, err := s.ExecutionAt(tx) - if err != nil { - return err - } - - if err := s.Update(tx, to); err != nil { - return err - } - - if freshTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil } @@ -68,15 +27,15 @@ func UnwindSequencerInterhashsStage( s *stagedsync.StageState, tx kv.RwTx, ctx context.Context, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, ) error { - return UnwindZkIntermediateHashesStage(u, s, tx, ZkInterHashesCfg{}, ctx) + return UnwindZkIntermediateHashesStage(u, s, tx, cfg, ctx) } func PruneSequencerInterhashesStage( s *stagedsync.PruneState, tx kv.RwTx, - cfg SequencerInterhashesCfg, + cfg ZkInterHashesCfg, ctx context.Context, ) error { return nil diff --git a/zk/stages/stages.go b/zk/stages/stages.go index a422ea52888..583d0c9eaf0 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -17,11 +17,9 @@ func SequencerZkStages( l1InfoTreeCfg L1InfoTreeCfg, sequencerL1BlockSyncCfg SequencerL1BlockSyncCfg, dataStreamCatchupCfg DataStreamCatchupCfg, - sequencerInterhashesCfg SequencerInterhashesCfg, exec SequenceBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, - sequencerExecutorVerifyCfg SequencerExecutorVerifyCfg, history stages.HistoryCfg, logIndex stages.LogIndexCfg, callTraces stages.CallTracesCfg, @@ -106,7 +104,7 @@ func SequencerZkStages( ID: stages2.Execution, Description: "Sequence transactions", Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencingStage(s, u, tx, ctx, exec, quiet) + return SpawnSequencingStage(s, u, ctx, exec, history, quiet) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return UnwindSequenceExecutionStage(u, s, tx, ctx, exec, firstCycle) @@ -119,26 +117,13 @@ func SequencerZkStages( ID: stages2.IntermediateHashes, Description: "Sequencer Intermediate Hashes", Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencerInterhashesStage(s, u, tx, ctx, sequencerInterhashesCfg, quiet) + return SpawnSequencerInterhashesStage(s, u, tx, ctx, zkInterHashesCfg, quiet) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { - return UnwindSequencerInterhashsStage(u, s, tx, ctx, sequencerInterhashesCfg) + return UnwindSequencerInterhashsStage(u, s, tx, ctx, zkInterHashesCfg) }, Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { - return PruneSequencerInterhashesStage(p, tx, sequencerInterhashesCfg, ctx) - }, - }, - { - ID: stages2.SequenceExecutorVerify, - Description: "Sequencer, check batch with legacy executor", - Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return SpawnSequencerExecutorVerifyStage(s, u, tx, ctx, sequencerExecutorVerifyCfg, quiet) - }, - Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { - return UnwindSequencerExecutorVerifyStage(u, s, tx, ctx, sequencerExecutorVerifyCfg) - }, - Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx) error { - return PruneSequencerExecutorVerifyStage(p, tx, sequencerExecutorVerifyCfg, ctx) + return PruneSequencerInterhashesStage(p, tx, zkInterHashesCfg, ctx) }, }, { @@ -175,7 +160,9 @@ func SequencerZkStages( Description: "Generate account history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return stages.SpawnAccountHistoryIndex(s, tx, history, ctx) + // return stages.SpawnAccountHistoryIndex(s, tx, history, ctx) + // only forward part of this stage is part of execution stage + return nil }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindAccountHistoryIndex(u, s, tx, history, ctx) @@ -189,7 +176,9 @@ func SequencerZkStages( Description: "Generate storage history index", Disabled: false, Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, tx kv.RwTx, quiet bool) error { - return stages.SpawnStorageHistoryIndex(s, tx, history, ctx) + // return stages.SpawnStorageHistoryIndex(s, tx, history, ctx) + // only forward part of this stage is part of execution stage + return nil }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, tx kv.RwTx) error { return stages.UnwindStorageHistoryIndex(u, s, tx, history, ctx) diff --git a/zk/txpool/pool_zk_limbo.go b/zk/txpool/pool_zk_limbo.go index 9bb6f57719b..f79e827fe3c 100644 --- a/zk/txpool/pool_zk_limbo.go +++ b/zk/txpool/pool_zk_limbo.go @@ -499,15 +499,15 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca return nil } + p.limbo.limboSlots = &types.TxSlots{} + parseCtx := types.NewTxParseContext(p.chainID) + parseCtx.WithSender(false) + it, err := tx.Range(TablePoolLimbo, nil, nil) if err != nil { return err } - p.limbo.limboSlots = &types.TxSlots{} - parseCtx := types.NewTxParseContext(p.chainID) - parseCtx.WithSender(false) - for it.HasNext() { k, v, err := it.Next() if err != nil { diff --git a/zk/txpool/pool_zk_limbo_processor.go b/zk/txpool/pool_zk_limbo_processor.go index d38b88ee2ee..ef46e18e5b1 100644 --- a/zk/txpool/pool_zk_limbo_processor.go +++ b/zk/txpool/pool_zk_limbo_processor.go @@ -83,11 +83,12 @@ func (_this *LimboSubPoolProcessor) run() { unlimitedCounters[k] = math.MaxInt32 } + blockNumbers := []uint64{1} // let's assume that there is a just single block number 1, because the number itself does not matter invalidTxs := []*string{} for _, limboBatch := range limboBatchDetails { for _, limboTx := range limboBatch.Transactions { - request := legacy_executor_verifier.NewVerifierRequest(limboBatch.BatchNumber, limboBatch.ForkId, limboTx.Root, unlimitedCounters) + request := legacy_executor_verifier.NewVerifierRequest(limboBatch.ForkId, limboBatch.BatchNumber, blockNumbers, limboTx.Root, unlimitedCounters) err := _this.verifier.VerifySync(tx, request, limboBatch.Witness, limboTx.StreamBytes, limboBatch.TimestampLimit, limboBatch.FirstBlockNumber, limboBatch.L1InfoTreeMinTimestamps) if err != nil { idHash := hexutils.BytesToHex(limboTx.Hash[:]) @@ -101,5 +102,4 @@ func (_this *LimboSubPoolProcessor) run() { } _this.txPool.MarkProcessedLimboDetails(size, invalidTxs) - } diff --git a/zk/txpool/txpooluitl/all_components.go b/zk/txpool/txpooluitl/all_components.go index e7fba40e4e4..f98de551529 100644 --- a/zk/txpool/txpooluitl/all_components.go +++ b/zk/txpool/txpooluitl/all_components.go @@ -22,8 +22,8 @@ import ( "time" "github.com/c2h5oh/datasize" - "github.com/holiman/uint256" "github.com/gateway-fm/cdk-erigon-lib/txpool/txpoolcfg" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" mdbx2 "github.com/torquem-ch/mdbx-go/mdbx" @@ -134,6 +134,12 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, ethCfg *ethconfig. return nil, nil, nil, nil, nil, err } + if err = txPoolDB.Update(ctx, func(tx kv.RwTx) error { + return txpool.CreateTxPoolBuckets(tx) + }); err != nil { + return nil, nil, nil, nil, nil, err + } + fetch := txpool.NewFetch(ctx, sentryClients, txPool, stateChangesClient, chainDB, txPoolDB, *chainID) //fetch.ConnectCore() //fetch.ConnectSentries() diff --git a/zk/utils/zk_tables.go b/zk/utils/zk_tables.go new file mode 100644 index 00000000000..028697628fc --- /dev/null +++ b/zk/utils/zk_tables.go @@ -0,0 +1,29 @@ +package utils + +import ( + "github.com/gateway-fm/cdk-erigon-lib/kv" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +func PopulateMemoryMutationTables(batch kv.RwTx) error { + for _, table := range hermez_db.HermezDbTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + for _, table := range db.HermezSmtTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + for _, table := range kv.ChaindataTables { + if err := batch.CreateBucket(table); err != nil { + return err + } + } + + return nil +} diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 20cd7652b27..37503147d71 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -192,7 +192,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, blocks []*eri batch := memdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) defer batch.Rollback() - if err = populateDbTables(batch); err != nil { + if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { return nil, err } @@ -350,36 +350,3 @@ func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { } return buf.Bytes(), nil } - -func populateDbTables(batch kv.RwTx) error { - tables := []string{ - db2.TableSmt, - db2.TableAccountValues, - db2.TableMetadata, - db2.TableHashKey, - db2.TableStats, - hermez_db.TX_PRICE_PERCENTAGE, - hermez_db.BLOCKBATCHES, - hermez_db.BATCH_BLOCKS, - hermez_db.BLOCK_GLOBAL_EXIT_ROOTS, - hermez_db.GLOBAL_EXIT_ROOTS_BATCHES, - hermez_db.STATE_ROOTS, - hermez_db.BATCH_WITNESSES, - hermez_db.L1_BLOCK_HASHES, - hermez_db.BLOCK_L1_BLOCK_HASHES, - hermez_db.INTERMEDIATE_TX_STATEROOTS, - hermez_db.REUSED_L1_INFO_TREE_INDEX, - hermez_db.LATEST_USED_GER, - hermez_db.L1_INFO_TREE_UPDATES_BY_GER, - hermez_db.SMT_DEPTHS, - hermez_db.INVALID_BATCHES, - } - - for _, t := range tables { - if err := batch.CreateBucket(t); err != nil { - return err - } - } - - return nil -}