From 92eb71e0a2271383cf23555e43895c57faae15d5 Mon Sep 17 00:00:00 2001 From: Yacov Manevich Date: Thu, 2 Jan 2025 01:26:49 +0100 Subject: [PATCH] Build blocks asynchronously When it is a node's turn to propose or verify a block, it calls the BlockBuilder's BuildBlock() and Verify() methods respectively. However, this method may take a long time to complete, and during this time, the Epoch instance cannot process messages (since proposing a new block may be called indirectly via HandleMessage). This commit adds an asynchronous task scheduler which the Epoch uses to register a callback to make itself propose the block or verify it it asynchronously and continue the program flow from that point onwards. While the block is verified or built, Simplex is free to handle other messages. Signed-off-by: Yacov Manevich --- epoch.go | 223 +++++++++++++++++++++++++++++++++------- epoch_multinode_test.go | 17 ++- epoch_test.go | 4 +- recovery_test.go | 17 ++- sched.go | 143 ++++++++++++++++++++++++++ sched_test.go | 126 +++++++++++++++++++++++ 6 files changed, 488 insertions(+), 42 deletions(-) create mode 100644 sched.go create mode 100644 sched_test.go diff --git a/epoch.go b/epoch.go index 74cff59..c348124 100644 --- a/epoch.go +++ b/epoch.go @@ -6,17 +6,22 @@ package simplex import ( "bytes" "context" + "crypto/sha256" "encoding/binary" "errors" "fmt" "simplex/record" + "sync" "sync/atomic" "time" "go.uber.org/zap" ) -const defaultMaxRoundWindow = 10 +const ( + defaultMaxRoundWindow = 10 + defaultMaxPendingBlocks = 10 +) type Round struct { num uint64 @@ -55,6 +60,8 @@ type EpochConfig struct { type Epoch struct { EpochConfig // Runtime + sched *scheduler + lock sync.Mutex lastBlock Block // latest block commited canReceiveMessages atomic.Bool finishCtx context.Context @@ -66,6 +73,7 @@ type Epoch struct { futureMessages messagesFromNode round uint64 // The current round we notarize maxRoundWindow uint64 + maxPendingBlocks int } func NewEpoch(conf EpochConfig) (*Epoch, error) { @@ -82,6 +90,9 @@ func (e *Epoch) AdvanceTime(t time.Duration) { // HandleMessage notifies the engine about a reception of a message. func (e *Epoch) HandleMessage(msg *Message, from NodeID) error { + e.lock.Lock() + defer e.lock.Unlock() + // Guard against receiving messages before we are ready to handle them. if !e.canReceiveMessages.Load() { e.Logger.Warn("Cannot receive a message") @@ -112,11 +123,13 @@ func (e *Epoch) HandleMessage(msg *Message, from NodeID) error { } func (e *Epoch) init() error { + e.sched = NewScheduler() e.finishCtx, e.finishFn = context.WithCancel(context.Background()) e.nodes = e.Comm.ListNodes() e.quorumSize = Quorum(len(e.nodes)) e.rounds = make(map[uint64]*Round) e.maxRoundWindow = defaultMaxRoundWindow + e.maxPendingBlocks = defaultMaxPendingBlocks e.eligibleNodeIDs = make(map[string]struct{}, len(e.nodes)) e.futureMessages = make(messagesFromNode, len(e.nodes)) for _, node := range e.nodes { @@ -406,10 +419,31 @@ func (e *Epoch) handleFinalizationMessage(message *Finalization, from NodeID) er return nil } + // If we have not received the proposal yet, we won't have a Round object in e.rounds, + // yet we may receive the corresponding finalization. + // This may happen if we're asynchronously verifying the proposal at the moment. + var pendingProposal bool + if _, exists := e.rounds[finalization.Round]; !exists && e.round == finalization.Round { + pendingProposal = true + } + + // This finalization may correspond to a proposal from a future round, or to the proposal of the current round + // which we are still verifying. + if (e.round < finalization.Round && finalization.Round-e.round < e.maxRoundWindow) || pendingProposal { + e.Logger.Debug("Got vote from round too far in the future", zap.Uint64("round", finalization.Round), zap.Uint64("my round", e.round)) + msgsForRound, exists := e.futureMessages[string(from)][finalization.Round] + if !exists { + msgsForRound = &messagesForRound{} + e.futureMessages[string(from)][finalization.Round] = msgsForRound + } + msgsForRound.finalization = message + return nil + } + // Have we already finalized this round? round, exists := e.rounds[finalization.Round] if !exists { - e.Logger.Debug("Received finalization for an unknown round", zap.Uint64("round", finalization.Round)) + e.Logger.Debug("Received finalization for an unknown round", zap.Uint64("ourRound", e.round), zap.Uint64("round", finalization.Round)) return nil } @@ -427,13 +461,46 @@ func (e *Epoch) handleFinalizationMessage(message *Finalization, from NodeID) er return e.maybeCollectFinalizationCertificate(round) } -func (e *Epoch) handleVoteMessage(message *Vote, _ NodeID) error { +func (e *Epoch) handleVoteMessage(message *Vote, from NodeID) error { vote := message.Vote + // Only process point to point votes. + // This is needed to prevent a malicious node from sending us a vote of a different node for a future round. + // Since we only verify the vote when it's due time, this will effectively block us from saving the real vote + // from the real node for a future round. + if !from.Equals(message.Signature.Signer) { + e.Logger.Debug("Received a vote signed by a different party than sent it", + zap.Stringer("signer", message.Signature.Signer), zap.Stringer("sender", from), + zap.Stringer("digest", vote.Digest)) + return nil + } + + // If we have not received the proposal yet, we won't have a Round object in e.rounds, + // yet we may receive the corresponding vote. + // This may happen if we're asynchronously verifying the proposal at the moment. + var pendingProposal bool + if _, exists := e.rounds[vote.Round]; !exists && e.round == vote.Round { + pendingProposal = true + } + + // This vote may correspond to a proposal from a future round, or to the proposal of the current round + // which we are still verifying. + if (e.round < vote.Round && vote.Round-e.round < e.maxRoundWindow) || pendingProposal { + e.Logger.Debug("Got vote from round too far in the future", zap.Uint64("round", vote.Round), zap.Uint64("my round", e.round)) + msgsForRound, exists := e.futureMessages[string(from)][vote.Round] + if !exists { + msgsForRound = &messagesForRound{} + e.futureMessages[string(from)][vote.Round] = msgsForRound + } + msgsForRound.vote = message + return nil + } + // TODO: what if we've received a vote for a round we didn't instantiate yet? round, exists := e.rounds[vote.Round] if !exists { - e.Logger.Debug("Received a vote for a non existent round", zap.Uint64("round", vote.Round)) + e.Logger.Debug("Received a vote for a non existent round", + zap.Uint64("round", vote.Round), zap.Uint64("our round", e.round)) return nil } @@ -705,11 +772,24 @@ func (e *Epoch) handleBlockMessage(message *BlockMessage, _ NodeID) error { return nil } + pendingBlocks := e.sched.Size() + if pendingBlocks > e.maxPendingBlocks { + e.Logger.Warn("Too many blocks being verified to ingest another one", zap.Int("pendingBlocks", pendingBlocks)) + return nil + } + vote := message.Vote from := vote.Signature.Signer md := block.BlockHeader() + e.Logger.Debug("Handling block message", zap.Stringer("digest", md.Digest), zap.Uint64("round", md.Round)) + + // Don't bother processing blocks from the past + if e.round > md.Round { + return nil + } + // Ignore block messages sent by us if e.ID.Equals(from) { e.Logger.Debug("Got a BlockMessage from ourselves or created by us") @@ -762,31 +842,70 @@ func (e *Epoch) handleBlockMessage(message *BlockMessage, _ NodeID) error { return nil } - if !e.storeProposal(block) { - e.Logger.Warn("Unable to store proposed block for the round", zap.Stringer("NodeID", from), zap.Uint64("round", md.Round)) - // TODO: timeout - } + // Create a task that will verify the block in the future, after its predecessors have also been verified. + task := e.scheduleBlockVerification(block, md, from, vote) - // Once we have stored the proposal, we have a Round object for the round. - // We store the vote to prevent verifying its signature again. - round, exists := e.rounds[md.Round] - if !exists { - // This shouldn't happen, but in case it does, return an error - return fmt.Errorf("programming error: round %d not found", md.Round) - } - round.votes[string(vote.Signature.Signer)] = &vote + // isBlockReadyToBeScheduled checks if the block is known to us either from some previous round, + // or from storage. If so, then we have verified it in the past, since only verified blocks are saved in memory. + canBeImmediatelyVerified := e.isBlockReadyToBeScheduled(md.Seq, md.Prev) - if err := block.Verify(); err != nil { - e.Logger.Debug("Failed verifying block", zap.Error(err)) - return nil - } - record := BlockRecord(md, block.Bytes()) - if err := e.WAL.Append(record); err != nil { - e.Logger.Error("Failed appending block to WAL", zap.Error(err)) - return err + // Schedule the block to be verified once its direct predecessor have been verified, + // or if it can be verified immediately. + e.Logger.Debug("Scheduling block verification", zap.Uint64("round", md.Round)) + e.sched.Schedule(md.Digest, task, md.Prev, canBeImmediatelyVerified) + + return nil +} + +func (e *Epoch) scheduleBlockVerification(block Block, md BlockHeader, from NodeID, vote Vote) func() { + return func() { + e.lock.Lock() + defer e.lock.Unlock() + + if err := block.Verify(); err != nil { + e.Logger.Debug("Failed verifying block", zap.Error(err)) + return + } + record := BlockRecord(md, block.Bytes()) + e.WAL.Append(record) + + if !e.storeProposal(block) { + e.Logger.Warn("Unable to store proposed block for the round", zap.Stringer("NodeID", from), zap.Uint64("round", md.Round)) + return + // TODO: timeout + } + + // Once we have stored the proposal, we have a Round object for the round. + // We store the vote to prevent verifying its signature again. + round, exists := e.rounds[md.Round] + if !exists { + // This shouldn't happen, but in case it does, return an error + e.Logger.Error("programming error: round not found", zap.Uint64("round", md.Round)) + return + } + round.votes[string(vote.Signature.Signer)] = &vote + + if err := e.doProposed(block, vote, from); err != nil { + e.Logger.Debug("Failed voting on block", zap.Error(err)) + } } +} - return e.doProposed(block, vote) +func (e *Epoch) isBlockReadyToBeScheduled(seq uint64, prev Digest) bool { + var ready bool + + if seq > 0 { + // A block can be scheduled if its predecessor either exists in storage, + // or there exists a round object for it. + // Since we only create a round object after we verify the block, + // it means we have verified this block in the past. + _, ok := e.locateBlock(seq-1, prev[:]) + ready = ok + } else { + // The first block is always ready to be scheduled + ready = true + } + return ready } func (e *Epoch) wasBlockAlreadyVerified(from NodeID, md BlockHeader) bool { @@ -897,12 +1016,39 @@ func (e *Epoch) locateBlock(seq uint64, digest []byte) (Block, bool) { return nil, false } -func (e *Epoch) proposeBlock() error { - block, ok := e.BlockBuilder.BuildBlock(e.finishCtx, e.Metadata()) - if !ok { - return errors.New("failed to build block") +func (e *Epoch) buildBlock() { + metadata := e.metadata() + + task := e.scheduleBlockBuilding(metadata) + + // We set the task ID to be the hash of the previous block, since we don't know the digest + // of the block before it is built. + // We know, however, that any block before or after the previous block won't + // have this digest, under the assumption that the hash function is collision resistant. + // TODO: Inject this hash as a dependency and don't use SHA256 as a hardcoded function + taskID := sha256.Sum256(metadata.Prev[:]) + + e.Logger.Debug("Scheduling block building", zap.Uint64("round", metadata.Round)) + canBeImmediatelyVerified := e.isBlockReadyToBeScheduled(metadata.Seq, metadata.Prev) + e.sched.Schedule(taskID, task, metadata.Prev, canBeImmediatelyVerified) +} + +func (e *Epoch) scheduleBlockBuilding(metadata ProtocolMetadata) func() { + return func() { + block, ok := e.BlockBuilder.BuildBlock(e.finishCtx, metadata) + if !ok { + e.Logger.Warn("Failed building block") + return + } + + e.lock.Lock() + defer e.lock.Unlock() + + e.proposeBlock(block) } +} +func (e *Epoch) proposeBlock(block Block) error { md := block.BlockHeader() // Write record to WAL before broadcasting it, so that @@ -945,6 +1091,13 @@ func (e *Epoch) proposeBlock() error { } func (e *Epoch) Metadata() ProtocolMetadata { + e.lock.Lock() + defer e.lock.Unlock() + + return e.metadata() +} + +func (e *Epoch) metadata() ProtocolMetadata { var prev Digest seq := e.Storage.Height() if e.lastBlock != nil { @@ -968,7 +1121,8 @@ func (e *Epoch) startRound() error { leaderForCurrentRound := LeaderForRound(e.nodes, e.round) if e.ID.Equals(leaderForCurrentRound) { - return e.proposeBlock() + e.buildBlock() + return nil } // If we're not the leader, check if we have received a proposal earlier for this round @@ -980,7 +1134,7 @@ func (e *Epoch) startRound() error { return e.handleBlockMessage(msgsForRound.proposal, leaderForCurrentRound) } -func (e *Epoch) doProposed(block Block, voteFromLeader Vote) error { +func (e *Epoch) doProposed(block Block, voteFromLeader Vote, from NodeID) error { vote, err := e.voteOnBlock(block) if err != nil { return err @@ -1075,6 +1229,8 @@ func (e *Epoch) storeNotarization(notarization Notarization) error { } func (e *Epoch) maybeLoadFutureMessages(round uint64) { + e.Logger.Debug("Loading messages received for this round in the past", zap.Uint64("round", round)) + for from, messagesFromNode := range e.futureMessages { if msgs, exists := messagesFromNode[round]; exists { if msgs.proposal != nil { @@ -1102,11 +1258,6 @@ func (e *Epoch) maybeLoadFutureMessages(round uint64) { func (e *Epoch) storeProposal(block Block) bool { md := block.BlockHeader() - // Don't bother processing blocks from the past - if e.round > md.Round { - return false - } - // Have we already received a block from that node? // If so, it cannot change its mind and send us a different block. if _, exists := e.rounds[md.Round]; exists { diff --git a/epoch_multinode_test.go b/epoch_multinode_test.go index 0cc03d2..37ff8f9 100644 --- a/epoch_multinode_test.go +++ b/epoch_multinode_test.go @@ -46,7 +46,6 @@ func TestSimplexMultiNodeSimple(t *testing.T) { for _, n := range instances { n.ledger.waitForBlockCommit(uint64(seq)) } - bb.triggerNewBlock() } } @@ -147,6 +146,22 @@ func (tw *testWAL) Append(b []byte) error { return err } +func (tw *testWAL) assertWALSize(n int) { + tw.lock.Lock() + defer tw.lock.Unlock() + + for { + rawRecords, err := tw.WriteAheadLog.ReadAll() + require.NoError(tw.t, err) + + if len(rawRecords) == n { + return + } + + tw.signal.Wait() + } +} + func (tw *testWAL) assertNotarization(round uint64) { tw.lock.Lock() defer tw.lock.Unlock() diff --git a/epoch_test.go b/epoch_test.go index 538a8d5..232a4dd 100644 --- a/epoch_test.go +++ b/epoch_test.go @@ -81,7 +81,9 @@ func TestEpochSimpleFlow(t *testing.T) { injectTestFinalization(t, e, block, nodes[i], conf.Signer) } - committedData := storage.data[i].Block.Bytes() + storage.waitForBlockCommit(uint64(i)) + + committedData := storage.data[uint64(i)].Block.Bytes() require.Equal(t, block.Bytes(), committedData) } } diff --git a/recovery_test.go b/recovery_test.go index 786f98e..644edfb 100644 --- a/recovery_test.go +++ b/recovery_test.go @@ -19,7 +19,7 @@ import ( func TestRecoverFromWALProposed(t *testing.T) { l := makeLogger(t, 1) bb := make(testBlockBuilder, 1) - wal := wal.NewMemWAL(t) + wal := newTestWAL(t) storage := newInMemStorage() ctx := context.Background() nodes := []NodeID{{1}, {2}, {3}, {4}} @@ -96,6 +96,8 @@ func TestRecoverFromWALProposed(t *testing.T) { injectTestFinalization(t, e, block, nodes[i], conf.Signer) } + storage.waitForBlockCommit(i) + committedData := storage.data[i].Block.Bytes() require.Equal(t, block.Bytes(), committedData) } @@ -246,11 +248,12 @@ func TestWalCreatedProperly(t *testing.T) { quorum := Quorum(len(nodes)) signatureAggregator := &testSignatureAggregator{} qd := &testQCDeserializer{t: t} + wal := newTestWAL(t) conf := EpochConfig{ Logger: l, ID: nodes[0], Signer: &testSigner{}, - WAL: wal.NewMemWAL(t), + WAL: wal, Verifier: &testVerifier{}, Storage: storage, Comm: noopComm(nodes), @@ -271,6 +274,7 @@ func TestWalCreatedProperly(t *testing.T) { require.NoError(t, e.Start()) // ensure a block record is written to the WAL + wal.assertWALSize(1) records, err = e.WAL.ReadAll() require.NoError(t, err) require.Len(t, records, 1) @@ -312,11 +316,12 @@ func TestWalWritesBlockRecord(t *testing.T) { storage := newInMemStorage() blockDeserializer := &blockDeserializer{} nodes := []NodeID{{1}, {2}, {3}, {4}} + wal := newTestWAL(t) conf := EpochConfig{ Logger: l, ID: nodes[1], // nodes[1] is not the leader for the first round Signer: &testSigner{}, - WAL: wal.NewMemWAL(t), + WAL: wal, Verifier: &testVerifier{}, Storage: storage, Comm: noopComm(nodes), @@ -356,6 +361,7 @@ func TestWalWritesBlockRecord(t *testing.T) { require.NoError(t, err) // ensure a block record is written to the WAL + wal.assertWALSize(1) records, err = e.WAL.ReadAll() require.NoError(t, err) require.Len(t, records, 1) @@ -371,11 +377,12 @@ func TestWalWritesFinalizationCert(t *testing.T) { sigAggregrator := &testSignatureAggregator{} nodes := []NodeID{{1}, {2}, {3}, {4}} quorum := Quorum(len(nodes)) + wal := newTestWAL(t) conf := EpochConfig{ Logger: l, ID: nodes[0], Signer: &testSigner{}, - WAL: wal.NewMemWAL(t), + WAL: wal, Verifier: &testVerifier{}, Storage: storage, Comm: noopComm(nodes), @@ -431,6 +438,8 @@ func TestWalWritesFinalizationCert(t *testing.T) { injectTestVote(t, e, secondBlock, nodes[i], conf.Signer) } + wal.assertWALSize(4) + records, err = e.WAL.ReadAll() require.NoError(t, err) require.Len(t, records, 4) diff --git a/sched.go b/sched.go new file mode 100644 index 0000000..153812c --- /dev/null +++ b/sched.go @@ -0,0 +1,143 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package simplex + +import "sync" + +type scheduler struct { + lock sync.Mutex + signal sync.Cond + pending dependencies[Digest, task] + ready []task + close bool +} + +func NewScheduler() *scheduler { + var as scheduler + as.pending = NewDependencies[Digest, task]() + as.signal = sync.Cond{L: &as.lock} + + go as.run() + + return &as +} + +func (as *scheduler) Size() int { + as.lock.Lock() + defer as.lock.Unlock() + return as.pending.Size() +} + +func (as *scheduler) Close() { + as.lock.Lock() + defer as.lock.Unlock() + + as.close = true + + as.signal.Signal() +} + +func (as *scheduler) run() { + as.lock.Lock() + defer as.lock.Unlock() + + for !as.close { + as.maybeExecuteTask() + if as.close { + return + } + as.signal.Wait() + } +} + +func (as *scheduler) maybeExecuteTask() { + for len(as.ready) > 0 { + if as.close { + return + } + var task task + task, as.ready = as.ready[0], as.ready[1:] + as.lock.Unlock() + task.f() + as.lock.Lock() + } +} + +func (as *scheduler) Schedule(id Digest, f func(), prev Digest, ready bool) { + as.lock.Lock() + defer as.lock.Unlock() + + if as.close { + return + } + + task := task{ + f: as.dispatchTaskAndScheduleDependingTasks(id, f), + parent: prev, + digest: id, + } + + if ready { + as.ready = append(as.ready, task) + } else { + as.pending.Insert(task) + } + + as.signal.Signal() +} + +func (as *scheduler) dispatchTaskAndScheduleDependingTasks(id Digest, task func()) func() { + return func() { + task() + as.lock.Lock() + defer as.lock.Unlock() + newlyReadyTasks := as.pending.Remove(id) + as.ready = append(as.ready, newlyReadyTasks...) + as.signal.Signal() + } +} + +type task struct { + f func() + digest Digest + parent Digest +} + +func (t task) dependsOn() Digest { + return t.parent +} + +func (t task) id() Digest { + return t.digest +} + +type dependent[C comparable] interface { + dependsOn() C + id() C +} + +type dependencies[C comparable, D dependent[C]] struct { + dependsOn map[C][]D // values depend on key. +} + +func NewDependencies[C comparable, D dependent[C]]() dependencies[C, D] { + return dependencies[C, D]{ + dependsOn: make(map[C][]D), + } +} + +func (t *dependencies[C, D]) Size() int { + return len(t.dependsOn) +} + +func (t *dependencies[C, D]) Insert(v D) { + dependency := v.dependsOn() + t.dependsOn[dependency] = append(t.dependsOn[dependency], v) +} + +func (t *dependencies[C, D]) Remove(id C) []D { + dependents := t.dependsOn[id] + delete(t.dependsOn, id) + return dependents +} diff --git a/sched_test.go b/sched_test.go new file mode 100644 index 0000000..c9c6bf3 --- /dev/null +++ b/sched_test.go @@ -0,0 +1,126 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package simplex + +import ( + "crypto/rand" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +type testDependsOn []int + +func (m testDependsOn) dependsOn() int { + return m[1] +} + +func (m testDependsOn) id() int { + return m[0] +} + +func TestDependencyTree(t *testing.T) { + dt := NewDependencies[int, testDependsOn]() + + for i := 0; i < 5; i++ { + // [0] (i+1) depends on [1] (i) + dt.Insert([]int{i + 1, i}) + } + + require.Equal(t, 5, dt.Size()) + + for i := 0; i < 5; i++ { + j := dt.Remove(i) + require.Len(t, j, 1) + require.Equal(t, i+1, j[0].id()) + } + +} + +func TestAsyncScheduler(t *testing.T) { + t.Run("Executes asynchronously", func(t *testing.T) { + as := NewScheduler() + defer as.Close() + + ticks := make(chan struct{}) + + var wg sync.WaitGroup + wg.Add(1) + + dig1 := makeDigest(t) + dig2 := makeDigest(t) + + as.Schedule(dig2, func() { + defer wg.Done() + <-ticks + }, dig1, true) + + ticks <- struct{}{} + wg.Wait() + }) + + t.Run("Does not execute when closed", func(t *testing.T) { + as := NewScheduler() + ticks := make(chan struct{}, 1) + + as.Close() + + dig1 := makeDigest(t) + dig2 := makeDigest(t) + + as.Schedule(dig2, func() { + close(ticks) + }, dig1, true) + + ticks <- struct{}{} + }) + + t.Run("Executes several pending tasks concurrently", func(t *testing.T) { + as := NewScheduler() + defer as.Close() + + n := 9000 + + var lock sync.Mutex + finished := make(map[Digest]struct{}) + + var wg sync.WaitGroup + wg.Add(n) + + var prevTask Digest + + for i := 0; i < n; i++ { + taskID := makeDigest(t) + scheduleTask(&lock, finished, prevTask, taskID, &wg, as, i) + // Next iteration's previous task ID is current task ID + prevTask = taskID + } + + wg.Wait() + }) +} + +func scheduleTask(lock *sync.Mutex, finished map[Digest]struct{}, dependency Digest, id Digest, wg *sync.WaitGroup, as *scheduler, i int) { + lock.Lock() + defer lock.Unlock() + + _, hasFinished := finished[dependency] + + task := func() { + lock.Lock() + defer lock.Unlock() + finished[id] = struct{}{} + wg.Done() + } + + as.Schedule(id, task, dependency, i == 0 || hasFinished) +} + +func makeDigest(t *testing.T) Digest { + var dig Digest + _, err := rand.Read(dig[:]) + require.NoError(t, err) + return dig +}