From 8e15470125ba3689dd7288e2aa7bbdca8730701b Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Tue, 19 Dec 2023 14:50:06 +0100 Subject: [PATCH] miner: Add block building interruption on payload resolution (getPayload) (#186) * miner: Add block building interruption on payload resolution (getPayload) * miner: Change full payload resolution, fix and add test * miner: Add parameter validation if skipping empty block We only build the empty block if we don't use the tx pool. So if we use the tx pool, a forkchoiceUpdated call would miss the implicit validation that's happening during empty block building, so we need to add it back. * miner: Always wait for block builder result after interrupting This commit changes the way the block builder/update routine and the resolution functions Resolve and ResolveFull synchronize. Resolve(Full) now signal the payload builder to pause and set the interrupt signal in case any block building is ongoing. They then wait for the interrupted block building to complete. This allowed to simplify the Payload implementation somewhat because the builder routine is now guaranteed to return before the resulting fields (full, fullFees etc) are read, and closing of the `stop` channel is now synchronized with a sync.Once. So the mutex and conditional variable could be removed and we only use two simple signalling channels `stop` and `done` for synchronization. * miner: Add testing mode to module Some test in the miner and catalyst package assume that getPayload can be immediately called after forkchoiceUpdated and then to return some built block. Because of the new behavior of payload resolution to interrupt any ongoing payload building process, this creates a race condition on block building. The new testing mode, which can be enabled by setting the package variable IsPayloadBuildingTest to true, guarantees that always at least one full block is built. It's hacky, but seems to be the easiest and less-intrusive way to enable the new behavior of payload resolution while still keeping all tests happy. * miner: Further improve block building interruption - Priotize stop signal over recommit - Don't start payload building update if last update duration doesn't fit until slot timeout. * miner: Partially revert rework of payload build stopping When resolving, we don't want to wait for the latest update. If a full block is available, we just return that one, as before. Payload building is still interrupted, but exits in the background. * miner: Return early when building interrupted payload updates * Remove global variable to change miner behaviour. Use a longer wait in tests for the payload to build. * miner: Interrupt first payload building job Also added interrupt test. Had to add sleep to make non-interrupt test work. * eth/catalyst: Add even more sleeps to make tests get over payload interruption * Deterministically wait for payloads to build the first full block * eth/catalyst,miner: Improve payload full block waiting in tests Also fix a bug in TestNilWithdrawals where the withdrawals weren't added to the ephemeral BuildPayloadArgs instance for re-calculating the payload id. * miner: Calculate sane block building time in validateParams Also always stop interrupt timer after fillTransactions in generateWork. --------- Co-authored-by: Adrian Sutton --- eth/catalyst/api_test.go | 34 ++++- eth/catalyst/queue.go | 19 +++ eth/catalyst/simulated_beacon.go | 3 + miner/payload_building.go | 222 +++++++++++++++++++++++-------- miner/payload_building_test.go | 64 ++++++++- miner/worker.go | 56 +++++++- 6 files changed, 322 insertions(+), 76 deletions(-) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 116bca1af9..7a8330f4df 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -28,6 +28,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -198,12 +200,10 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { SafeBlockHash: common.Hash{}, FinalizedBlockHash: common.Hash{}, } - _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) + resp, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) if err != nil { t.Fatalf("error preparing payload, err=%v", err) } - // give the payload some time to be built - time.Sleep(100 * time.Millisecond) payloadID := (&miner.BuildPayloadArgs{ Parent: fcState.HeadBlockHash, Timestamp: blockParams.Timestamp, @@ -211,6 +211,8 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { Random: blockParams.Random, BeaconRoot: blockParams.BeaconRoot, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) execData, err := api.GetPayloadV1(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -635,8 +637,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) { if resp.PayloadStatus.Status != engine.VALID { t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status) } - // give the payload some time to be built - time.Sleep(50 * time.Millisecond) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) if payload, err = api.GetPayloadV1(*resp.PayloadID); err != nil { t.Fatalf("can't get payload: %v", err) } @@ -684,6 +685,7 @@ func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.Pay if err != nil { return nil, err } + waitForPayloadToBuild(payload) return payload.ResolveFull().ExecutionPayload, nil } @@ -922,6 +924,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) { if err != nil { t.Fatalf("error preparing payload, err=%v", err) } + waitForPayloadToBuild(payload) data := *payload.Resolve().ExecutionPayload // We need to recompute the blockhash, since the miner computes a wrong (correct) blockhash txs, _ := decodeTransactions(data.Transactions) @@ -1082,6 +1085,8 @@ func TestWithdrawals(t *testing.T) { Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, payloadID)) execData, err := api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1116,7 +1121,7 @@ func TestWithdrawals(t *testing.T) { }, } fcState.HeadBlockHash = execData.ExecutionPayload.BlockHash - _, err = api.ForkchoiceUpdatedV2(fcState, &blockParams) + resp, err = api.ForkchoiceUpdatedV2(fcState, &blockParams) if err != nil { t.Fatalf("error preparing payload, err=%v", err) } @@ -1130,6 +1135,8 @@ func TestWithdrawals(t *testing.T) { Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, payloadID)) execData, err = api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1242,7 +1249,7 @@ func TestNilWithdrawals(t *testing.T) { } for _, test := range tests { - _, err := api.ForkchoiceUpdatedV2(fcState, &test.blockParams) + resp, err := api.ForkchoiceUpdatedV2(fcState, &test.blockParams) if test.wantErr { if err == nil { t.Fatal("wanted error on fcuv2 with invalid withdrawals") @@ -1260,7 +1267,10 @@ func TestNilWithdrawals(t *testing.T) { FeeRecipient: test.blockParams.SuggestedFeeRecipient, Random: test.blockParams.Random, BeaconRoot: test.blockParams.BeaconRoot, + Withdrawals: test.blockParams.Withdrawals, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, payloadID)) execData, err := api.GetPayloadV2(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1609,6 +1619,8 @@ func TestParentBeaconBlockRoot(t *testing.T) { Withdrawals: blockParams.Withdrawals, BeaconRoot: blockParams.BeaconRoot, }).Id() + require.Equal(t, payloadID, *resp.PayloadID) + require.NoError(t, waitForApiPayloadToBuild(api, *resp.PayloadID)) execData, err := api.GetPayloadV3(payloadID) if err != nil { t.Fatalf("error getting payload, err=%v", err) @@ -1647,3 +1659,11 @@ func TestParentBeaconBlockRoot(t *testing.T) { t.Fatalf("incorrect root stored: want %s, got %s", *blockParams.BeaconRoot, root) } } + +func waitForPayloadToBuild(payload *miner.Payload) { + payload.WaitFull() +} + +func waitForApiPayloadToBuild(api *ConsensusAPI, id engine.PayloadID) error { + return api.localBlocks.waitFull(id) +} diff --git a/eth/catalyst/queue.go b/eth/catalyst/queue.go index 634dc1b2e6..d42904843b 100644 --- a/eth/catalyst/queue.go +++ b/eth/catalyst/queue.go @@ -17,6 +17,7 @@ package catalyst import ( + "errors" "sync" "github.com/ethereum/go-ethereum/beacon/engine" @@ -91,6 +92,24 @@ func (q *payloadQueue) get(id engine.PayloadID, full bool) *engine.ExecutionPayl return nil } +// waitFull waits until the first full payload has been built for the specified payload id +// The method returns immediately if the payload is unknown. +func (q *payloadQueue) waitFull(id engine.PayloadID) error { + q.lock.RLock() + defer q.lock.RUnlock() + + for _, item := range q.payloads { + if item == nil { + return errors.New("unknown payload") + } + if item.id == id { + item.payload.WaitFull() + return nil + } + } + return errors.New("unknown payload") +} + // has checks if a particular payload is already tracked. func (q *payloadQueue) has(id engine.PayloadID) bool { q.lock.RLock() diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index a9a2bb4a9a..dfbb42878c 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -165,6 +165,9 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error { return errors.New("chain rewind prevented invocation of payload creation") } + if err := c.engineAPI.localBlocks.waitFull(*fcResponse.PayloadID); err != nil { + return err + } envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true) if err != nil { return err diff --git a/miner/payload_building.go b/miner/payload_building.go index 1510ee5a82..935329f57e 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -19,8 +19,10 @@ package miner import ( "crypto/sha256" "encoding/binary" + "errors" "math/big" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/beacon/engine" @@ -91,6 +93,10 @@ type Payload struct { stop chan struct{} lock sync.Mutex cond *sync.Cond + + err error + stopOnce sync.Once + interrupt *atomic.Int32 // interrupt signal shared with worker } // newPayload initializes the payload object. @@ -99,12 +105,16 @@ func newPayload(empty *types.Block, id engine.PayloadID) *Payload { id: id, empty: empty, stop: make(chan struct{}), + + interrupt: new(atomic.Int32), } log.Info("Starting work on payload", "id", payload.id) payload.cond = sync.NewCond(&payload.lock) return payload } +var errInterruptedUpdate = errors.New("interrupted payload update") + // update updates the full-block with latest built version. func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) { payload.lock.Lock() @@ -115,6 +125,19 @@ func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) { return // reject stale update default: } + + defer payload.cond.Broadcast() // fire signal for notifying any full block result + + if errors.Is(r.err, errInterruptedUpdate) { + log.Debug("Ignoring interrupted payload update", "id", payload.id) + return + } else if r.err != nil { + log.Warn("Error building payload update", "id", payload.id, "err", r.err) + payload.err = r.err // record latest error + return + } + log.Debug("New payload update", "id", payload.id, "elapsed", common.PrettyDuration(elapsed)) + // Ensure the newly provided full block has a higher transaction fee. // In post-merge stage, there is no uncle reward anymore and transaction // fee(apart from the mev revenue) is the only indicator for comparison. @@ -136,24 +159,12 @@ func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) { "elapsed", common.PrettyDuration(elapsed), ) } - payload.cond.Broadcast() // fire signal for notifying full block } // Resolve returns the latest built payload and also terminates the background // thread for updating payload. It's safe to be called multiple times. func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope { - payload.lock.Lock() - defer payload.lock.Unlock() - - select { - case <-payload.stop: - default: - close(payload.stop) - } - if payload.full != nil { - return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) - } - return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) + return payload.resolve(false) } // ResolveEmpty is basically identical to Resolve, but it expects empty block only. @@ -168,10 +179,24 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope { // ResolveFull is basically identical to Resolve, but it expects full block only. // Don't call Resolve until ResolveFull returns, otherwise it might block forever. func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope { + return payload.resolve(true) +} + +func (payload *Payload) WaitFull() { + payload.lock.Lock() + defer payload.lock.Unlock() + payload.cond.Wait() +} + +func (payload *Payload) resolve(onlyFull bool) *engine.ExecutionPayloadEnvelope { payload.lock.Lock() defer payload.lock.Unlock() - if payload.full == nil { + // We interrupt any active building block to prevent it from adding more transactions, + // and if it is an update, don't attempt to seal the block. + payload.interruptBuilding() + + if payload.full == nil && (onlyFull || payload.empty == nil) { select { case <-payload.stop: return nil @@ -182,22 +207,82 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope { // terminates the background construction process. payload.cond.Wait() } - // Terminate the background payload construction - select { - case <-payload.stop: - default: - close(payload.stop) + + // Now we can signal the building routine to stop. + payload.stopBuilding() + + if payload.full != nil { + return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) + } else if !onlyFull && payload.empty != nil { + return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) + } else if err := payload.err; err != nil { + log.Error("Error building any payload", "id", payload.id, "err", err) } - return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) + return nil +} + +// interruptBuilding sets an interrupt for a potentially ongoing +// block building process. +// This will prevent it from adding new transactions to the block, and if it is +// building an update, the block will also not be sealed, as we would discard +// the update anyways. +// interruptBuilding is safe to be called concurrently. +func (payload *Payload) interruptBuilding() { + // Set the interrupt if not interrupted already. + // It's ok if it has either already been interrupted by payload resolution earlier, + // or by the timeout timer set to commitInterruptTimeout. + if payload.interrupt.CompareAndSwap(commitInterruptNone, commitInterruptResolve) { + log.Debug("Interrupted payload building.", "id", payload.id) + } else { + log.Debug("Payload building already interrupted.", + "id", payload.id, "interrupt", payload.interrupt.Load()) + } +} + +// stopBuilding signals to the block updating routine to stop. An ongoing payload +// building job will still complete. It can be interrupted to stop filling new +// transactions with interruptBuilding. +// stopBuilding is safe to be called concurrently. +func (payload *Payload) stopBuilding() { + // Concurrent Resolve calls should only stop once. + payload.stopOnce.Do(func() { + log.Debug("Stop payload building.", "id", payload.id) + close(payload.stop) + }) } // buildPayload builds the payload according to the provided parameters. func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { - // Build the initial version with no transaction included. It should be fast - // enough to run. The empty payload can at least make sure there is something - // to deliver for not missing slot. - // In OP-Stack, the "empty" block is constructed from provided txs only, i.e. no tx-pool usage. - emptyParams := &generateParams{ + if args.NoTxPool { // don't start the background payload updating job if there is no tx pool to pull from + // Build the initial version with no transaction included. It should be fast + // enough to run. The empty payload can at least make sure there is something + // to deliver for not missing slot. + // In OP-Stack, the "empty" block is constructed from provided txs only, i.e. no tx-pool usage. + emptyParams := &generateParams{ + timestamp: args.Timestamp, + forceTime: true, + parentHash: args.Parent, + coinbase: args.FeeRecipient, + random: args.Random, + withdrawals: args.Withdrawals, + beaconRoot: args.BeaconRoot, + noTxs: true, + txs: args.Transactions, + gasLimit: args.GasLimit, + } + empty := w.getSealingBlock(emptyParams) + if empty.err != nil { + return nil, empty.err + } + payload := newPayload(empty.block, args.Id()) + // make sure to make it appear as full, otherwise it will wait indefinitely for payload building to complete. + payload.full = empty.block + payload.fullFees = empty.fees + payload.cond.Broadcast() // unblocks Resolve + return payload, nil + } + + fullParams := &generateParams{ timestamp: args.Timestamp, forceTime: true, parentHash: args.Parent, @@ -205,24 +290,22 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { random: args.Random, withdrawals: args.Withdrawals, beaconRoot: args.BeaconRoot, - noTxs: true, + noTxs: false, txs: args.Transactions, gasLimit: args.GasLimit, } - empty := w.getSealingBlock(emptyParams) - if empty.err != nil { - return nil, empty.err - } - // Construct a payload object for return. - payload := newPayload(empty.block, args.Id()) - if args.NoTxPool { // don't start the background payload updating job if there is no tx pool to pull from - // make sure to make it appear as full, otherwise it will wait indefinitely for payload building to complete. - payload.full = empty.block - payload.fullFees = empty.fees - return payload, nil + // Since we skip building the empty block when using the tx pool, we need to explicitly + // validate the BuildPayloadArgs here. + blockTime, err := w.validateParams(fullParams) + if err != nil { + return nil, err } + payload := newPayload(nil, args.Id()) + // set shared interrupt + fullParams.interrupt = payload.interrupt + // Spin up a routine for updating the payload in background. This strategy // can maximum the revenue for including transactions with highest fee. go func() { @@ -231,38 +314,59 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { timer := time.NewTimer(0) defer timer.Stop() - // Setup the timer for terminating the process if SECONDS_PER_SLOT (12s in - // the Mainnet configuration) have passed since the point in time identified - // by the timestamp parameter. - endTimer := time.NewTimer(time.Second * 12) + start := time.Now() + // Setup the timer for terminating the payload building process as determined + // by validateParams. + endTimer := time.NewTimer(blockTime) + defer endTimer.Stop() - fullParams := &generateParams{ - timestamp: args.Timestamp, - forceTime: true, - parentHash: args.Parent, - coinbase: args.FeeRecipient, - random: args.Random, - withdrawals: args.Withdrawals, - beaconRoot: args.BeaconRoot, - noTxs: false, - txs: args.Transactions, - gasLimit: args.GasLimit, + timeout := time.Now().Add(blockTime) + + stopReason := "delivery" + defer func() { + log.Info("Stopping work on payload", + "id", payload.id, + "reason", stopReason, + "elapsed", time.Since(start).Milliseconds()) + }() + + updatePayload := func() time.Duration { + start := time.Now() + // getSealingBlock is interrupted by shared interrupt + r := w.getSealingBlock(fullParams) + dur := time.Since(start) + // update handles error case + payload.update(r, dur) + if r.err == nil { + // after first successful pass, we're updating + fullParams.isUpdate = true + } + timer.Reset(w.recommit) + return dur } + var lastDuration time.Duration for { select { case <-timer.C: - start := time.Now() - r := w.getSealingBlock(fullParams) - if r.err == nil { - payload.update(r, time.Since(start)) + // We have to prioritize the stop signal because the recommit timer + // might have fired while stop also got closed. + select { + case <-payload.stop: + return + default: + } + // Assuming last payload building duration as lower bound for next one, + // skip new update if we're too close to the timeout anyways. + if lastDuration > 0 && time.Now().Add(lastDuration).After(timeout) { + stopReason = "near-timeout" + return } - timer.Reset(w.recommit) + lastDuration = updatePayload() case <-payload.stop: - log.Info("Stopping work on payload", "id", payload.id, "reason", "delivery") return case <-endTimer.C: - log.Info("Stopping work on payload", "id", payload.id, "reason", "timeout") + stopReason = "timeout" return } } diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 6f57363441..d9208e7bc2 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -17,6 +17,7 @@ package miner import ( + "math/big" "reflect" "testing" "time" @@ -30,6 +31,14 @@ import ( ) func TestBuildPayload(t *testing.T) { + t.Run("no-tx-pool", func(t *testing.T) { testBuildPayload(t, true, false) }) + // no-tx-pool case with interrupt not interesting because no-tx-pool doesn't run + // the builder routine + t.Run("with-tx-pool", func(t *testing.T) { testBuildPayload(t, false, false) }) + t.Run("with-tx-pool-interrupt", func(t *testing.T) { testBuildPayload(t, false, true) }) +} + +func testBuildPayload(t *testing.T, noTxPool, interrupt bool) { var ( db = rawdb.NewMemoryDatabase() recipient = common.HexToAddress("0xdeadbeef") @@ -37,18 +46,33 @@ func TestBuildPayload(t *testing.T) { w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0) defer w.close() + const numInterruptTxs = 256 + if interrupt { + // when doing interrupt testing, create a large pool so interruption will + // definitely be visible. + txs := genTxs(1, numInterruptTxs) + b.txPool.Add(txs, true, false) + } + timestamp := uint64(time.Now().Unix()) args := &BuildPayloadArgs{ Parent: b.chain.CurrentBlock().Hash(), Timestamp: timestamp, Random: common.Hash{}, FeeRecipient: recipient, + NoTxPool: noTxPool, } + // payload resolution now interrupts block building, so we have to + // wait for the payloading building process to build its first block payload, err := w.buildPayload(args) if err != nil { t.Fatalf("Failed to build payload %v", err) } verify := func(outer *engine.ExecutionPayloadEnvelope, txs int) { + t.Helper() + if outer == nil { + t.Fatal("ExecutionPayloadEnvelope is nil") + } payload := outer.ExecutionPayload if payload.ParentHash != b.chain.CurrentBlock().Hash() { t.Fatal("Unexpect parent hash") @@ -62,15 +86,27 @@ func TestBuildPayload(t *testing.T) { if payload.FeeRecipient != recipient { t.Fatal("Unexpect fee recipient") } - if len(payload.Transactions) != txs { - t.Fatal("Unexpect transaction set") + if !interrupt && len(payload.Transactions) != txs { + t.Fatalf("Unexpect transaction set: got %d, expected %d", len(payload.Transactions), txs) + } else if interrupt && len(payload.Transactions) >= txs { + t.Fatalf("Unexpect transaction set: got %d, expected less than %d", len(payload.Transactions), txs) } } - empty := payload.ResolveEmpty() - verify(empty, 0) - full := payload.ResolveFull() - verify(full, len(pendingTxs)) + if noTxPool { + // we only build the empty block when ignoring the tx pool + empty := payload.ResolveEmpty() + verify(empty, 0) + full := payload.ResolveFull() + verify(full, 0) + } else if interrupt { + full := payload.ResolveFull() + verify(full, len(pendingTxs)+numInterruptTxs) + } else { // tx-pool and no interrupt + payload.WaitFull() + full := payload.ResolveFull() + verify(full, len(pendingTxs)) + } // Ensure resolve can be called multiple times and the // result should be unchanged @@ -81,6 +117,22 @@ func TestBuildPayload(t *testing.T) { } } +func genTxs(startNonce, count uint64) types.Transactions { + txs := make(types.Transactions, 0, count) + signer := types.LatestSigner(params.TestChainConfig) + for nonce := startNonce; nonce < startNonce+count; nonce++ { + txs = append(txs, types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{ + ChainID: params.TestChainConfig.ChainID, + Nonce: nonce, + To: &testUserAddress, + Value: big.NewInt(1000), + Gas: params.TxGas, + GasPrice: big.NewInt(params.InitialBaseFee), + })) + } + return txs +} + func TestPayloadId(t *testing.T) { ids := make(map[string]int) for i, tt := range []*BuildPayloadArgs{ diff --git a/miner/worker.go b/miner/worker.go index 4a73c1b1b6..87e1373192 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -80,6 +80,7 @@ var ( errBlockInterruptedByNewHead = errors.New("new head arrived while building block") errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") errBlockInterruptedByTimeout = errors.New("timeout while building block") + errBlockInterruptedByResolve = errors.New("payload resolution while building block") ) // environment is the worker's current environment and holds all @@ -144,6 +145,7 @@ const ( commitInterruptNewHead commitInterruptResubmit commitInterruptTimeout + commitInterruptResolve ) // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. @@ -937,8 +939,42 @@ type generateParams struct { beaconRoot *common.Hash // The beacon root (cancun field). noTxs bool // Flag whether an empty block without any transaction is expected - txs types.Transactions // Deposit transactions to include at the start of the block - gasLimit *uint64 // Optional gas limit override + txs types.Transactions // Deposit transactions to include at the start of the block + gasLimit *uint64 // Optional gas limit override + interrupt *atomic.Int32 // Optional interruption signal to pass down to worker.generateWork + isUpdate bool // Optional flag indicating that this is building a discardable update +} + +// validateParams validates the given parameters. +// It currently checks that the parent block is known and that the timestamp is valid, +// i.e., after the parent block's timestamp. +// It returns an upper bound of the payload building duration as computed +// by the difference in block timestamps between the parent and genParams. +func (w *worker) validateParams(genParams *generateParams) (time.Duration, error) { + w.mu.RLock() + defer w.mu.RUnlock() + + // Find the parent block for sealing task + parent := w.chain.CurrentBlock() + if genParams.parentHash != (common.Hash{}) { + block := w.chain.GetBlockByHash(genParams.parentHash) + if block == nil { + return 0, fmt.Errorf("missing parent %v", genParams.parentHash) + } + parent = block.Header() + } + + // Sanity check the timestamp correctness + blockTime := int64(genParams.timestamp) - int64(parent.Time) + if blockTime <= 0 && genParams.forceTime { + return 0, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, genParams.timestamp) + } + + // minimum payload build time of 2s + if blockTime < 2 { + blockTime = 2 + } + return time.Duration(blockTime) * time.Second, nil } // prepareWork constructs the sealing task according to the given parameters, @@ -1086,17 +1122,27 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { // forced transactions done, fill rest of block with transactions if !genParams.noTxs { - interrupt := new(atomic.Int32) + // use shared interrupt if present + interrupt := genParams.interrupt + if interrupt == nil { + interrupt = new(atomic.Int32) + } timer := time.AfterFunc(w.newpayloadTimeout, func() { interrupt.Store(commitInterruptTimeout) }) - defer timer.Stop() err := w.fillTransactions(interrupt, work) + timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout)) + } else if errors.Is(err, errBlockInterruptedByResolve) { + log.Info("Block building got interrupted by payload resolution") } } + if intr := genParams.interrupt; intr != nil && genParams.isUpdate && intr.Load() != commitInterruptNone { + return &newPayloadResult{err: errInterruptedUpdate} + } + block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, genParams.withdrawals) if err != nil { return &newPayloadResult{err: err} @@ -1264,6 +1310,8 @@ func signalToErr(signal int32) error { return errBlockInterruptedByRecommit case commitInterruptTimeout: return errBlockInterruptedByTimeout + case commitInterruptResolve: + return errBlockInterruptedByResolve default: panic(fmt.Errorf("undefined signal %d", signal)) }