From 64e52cf9388e179fe12a2892b8284be3e9ce7647 Mon Sep 17 00:00:00 2001 From: Jerry Date: Thu, 29 Aug 2024 08:56:35 -0700 Subject: [PATCH 1/7] zkevm_getProof (#1014) * feat: adding an implementation for eth_getProof This is just a proof of concept. This might not even make sense and probably don't belong in this particular file, but wanted to see if we could expose an implementation of [eip-1186](https://eips.ethereum.org/EIPS/eip-1186) for the SMT + poseidon. * SMT Proof + verification * Refactor and tests * Use finish stage to get latest block The latest block should be validated with a state root hash check before becoming the "latest" block. --------- Co-authored-by: John Hilliard Co-authored-by: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> --- cmd/rpcdaemon/commands/zkevm_api.go | 191 ++++++++++++++++++++++ core/types/accounts/account_proof.go | 19 +++ smt/pkg/smt/proof.go | 207 +++++++++++++++++++++++ smt/pkg/smt/proof_test.go | 236 +++++++++++++++++++++++++++ turbo/rpchelper/rpc_block.go | 2 +- 5 files changed, 654 insertions(+), 1 deletion(-) create mode 100644 smt/pkg/smt/proof.go create mode 100644 smt/pkg/smt/proof_test.go diff --git a/cmd/rpcdaemon/commands/zkevm_api.go b/cmd/rpcdaemon/commands/zkevm_api.go index e38d2fe63f7..c1a38ded839 100644 --- a/cmd/rpcdaemon/commands/zkevm_api.go +++ b/cmd/rpcdaemon/commands/zkevm_api.go @@ -11,25 +11,34 @@ import ( libcommon "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/common/hexutility" "github.com/gateway-fm/cdk-erigon-lib/kv" + "github.com/gateway-fm/cdk-erigon-lib/kv/memdb" jsoniter "github.com/json-iterator/go" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/rpc" + smtDb "github.com/ledgerwatch/erigon/smt/pkg/db" + smt "github.com/ledgerwatch/erigon/smt/pkg/smt" + smtUtils "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" types "github.com/ledgerwatch/erigon/zk/rpcdaemon" "github.com/ledgerwatch/erigon/zk/sequencer" + zkStages "github.com/ledgerwatch/erigon/zk/stages" "github.com/ledgerwatch/erigon/zk/syncer" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" + zkUtils "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" @@ -1444,3 +1453,185 @@ func populateBatchDataSlimDetails(batches []*types.BatchDataSlim) (json.RawMessa return json.Marshal(jBatches) } + +// GetProof +func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, storageKeys []common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.SMTAccProofResult, error) { + api := zkapi.ethApi + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + if api.historyV3(tx) { + return nil, fmt.Errorf("not supported by Erigon3") + } + + blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + latestBlock, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return nil, err + } + + if latestBlock < blockNr { + // shouldn't happen, but check anyway + return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, blockNr) + } + + batch := memdb.NewMemoryBatch(tx, api.dirs.Tmp) + defer batch.Rollback() + if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { + return nil, err + } + + if blockNr < latestBlock { + if latestBlock-blockNr > maxGetProofRewindBlockCount { + return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", maxGetProofRewindBlockCount, latestBlock) + } + unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} + stageState := &stagedsync.StageState{BlockNumber: latestBlock} + + interHashStageCfg := zkStages.StageZkInterHashesCfg(nil, true, true, false, api.dirs.Tmp, api._blockReader, nil, api.historyV3(tx), api._agg, nil) + + if err = zkStages.UnwindZkIntermediateHashesStage(unwindState, stageState, batch, interHashStageCfg, ctx, true); err != nil { + return nil, fmt.Errorf("unwind intermediate hashes: %w", err) + } + + if err != nil { + return nil, err + } + tx = batch + } + + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + if err != nil { + return nil, err + } + + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNr) + if err != nil { + return nil, err + } + + tds := state.NewTrieDbState(header.Root, tx, blockNr, nil) + tds.SetResolveReads(true) + tds.StartNewBuffer() + tds.SetStateReader(reader) + + ibs := state.New(tds) + + ibs.GetBalance(address) + + for _, key := range storageKeys { + value := new(uint256.Int) + ibs.GetState(address, &key, value) + } + + rl, err := tds.ResolveSMTRetainList() + if err != nil { + return nil, err + } + + smtTrie := smt.NewRoSMT(smtDb.NewRoEriDb(tx)) + + proofs, err := smt.BuildProofs(smtTrie, rl, ctx) + if err != nil { + return nil, err + } + + stateRootNode := smtUtils.ScalarToRoot(new(big.Int).SetBytes(header.Root.Bytes())) + + if err != nil { + return nil, err + } + + balanceKey, err := smtUtils.KeyEthAddrBalance(address.String()) + if err != nil { + return nil, err + } + + nonceKey, err := smtUtils.KeyEthAddrNonce(address.String()) + if err != nil { + return nil, err + } + + codeHashKey, err := smtUtils.KeyContractCode(address.String()) + if err != nil { + return nil, err + } + + codeLengthKey, err := smtUtils.KeyContractLength(address.String()) + if err != nil { + return nil, err + } + + balanceProofs := smt.FilterProofs(proofs, balanceKey) + balanceBytes, err := smt.VerifyAndGetVal(stateRootNode, balanceProofs, balanceKey) + if err != nil { + return nil, fmt.Errorf("balance proof verification failed: %w", err) + } + + balance := new(big.Int).SetBytes(balanceBytes) + + nonceProofs := smt.FilterProofs(proofs, nonceKey) + nonceBytes, err := smt.VerifyAndGetVal(stateRootNode, nonceProofs, nonceKey) + if err != nil { + return nil, fmt.Errorf("nonce proof verification failed: %w", err) + } + nonce := new(big.Int).SetBytes(nonceBytes).Uint64() + + codeHashProofs := smt.FilterProofs(proofs, codeHashKey) + codeHashBytes, err := smt.VerifyAndGetVal(stateRootNode, codeHashProofs, codeHashKey) + if err != nil { + return nil, fmt.Errorf("code hash proof verification failed: %w", err) + } + codeHash := codeHashBytes + + codeLengthProofs := smt.FilterProofs(proofs, codeLengthKey) + codeLengthBytes, err := smt.VerifyAndGetVal(stateRootNode, codeLengthProofs, codeLengthKey) + if err != nil { + return nil, fmt.Errorf("code length proof verification failed: %w", err) + } + codeLength := new(big.Int).SetBytes(codeLengthBytes).Uint64() + + accProof := &accounts.SMTAccProofResult{ + Address: address, + Balance: (*hexutil.Big)(balance), + CodeHash: libcommon.BytesToHash(codeHash), + CodeLength: hexutil.Uint64(codeLength), + Nonce: hexutil.Uint64(nonce), + BalanceProof: balanceProofs, + NonceProof: nonceProofs, + CodeHashProof: codeHashProofs, + CodeLengthProof: codeLengthProofs, + StorageProof: make([]accounts.SMTStorageProofResult, 0), + } + + addressArrayBig := smtUtils.ScalarToArrayBig(smtUtils.ConvertHexToBigInt(address.String())) + for _, k := range storageKeys { + storageKey, err := smtUtils.KeyContractStorage(addressArrayBig, k.String()) + if err != nil { + return nil, err + } + storageProofs := smt.FilterProofs(proofs, storageKey) + + valueBytes, err := smt.VerifyAndGetVal(stateRootNode, storageProofs, storageKey) + if err != nil { + return nil, fmt.Errorf("storage proof verification failed: %w", err) + } + + value := new(big.Int).SetBytes(valueBytes) + + accProof.StorageProof = append(accProof.StorageProof, accounts.SMTStorageProofResult{ + Key: k, + Value: (*hexutil.Big)(value), + Proof: storageProofs, + }) + } + + return accProof, nil +} diff --git a/core/types/accounts/account_proof.go b/core/types/accounts/account_proof.go index 4041353ae46..5061f4be438 100644 --- a/core/types/accounts/account_proof.go +++ b/core/types/accounts/account_proof.go @@ -22,3 +22,22 @@ type StorProofResult struct { Value *hexutil.Big `json:"value"` Proof []hexutility.Bytes `json:"proof"` } + +type SMTAccProofResult struct { + Address libcommon.Address `json:"address"` + Balance *hexutil.Big `json:"balance"` + CodeHash libcommon.Hash `json:"codeHash"` + CodeLength hexutil.Uint64 `json:"codeLength"` + Nonce hexutil.Uint64 `json:"nonce"` + BalanceProof []hexutility.Bytes `json:"balanceProof"` + NonceProof []hexutility.Bytes `json:"nonceProof"` + CodeHashProof []hexutility.Bytes `json:"codeHashProof"` + CodeLengthProof []hexutility.Bytes `json:"codeLengthProof"` + StorageProof []SMTStorageProofResult `json:"storageProof"` +} + +type SMTStorageProofResult struct { + Key libcommon.Hash `json:"key"` + Value *hexutil.Big `json:"value"` + Proof []hexutility.Bytes `json:"proof"` +} diff --git a/smt/pkg/smt/proof.go b/smt/pkg/smt/proof.go new file mode 100644 index 00000000000..2e225be666b --- /dev/null +++ b/smt/pkg/smt/proof.go @@ -0,0 +1,207 @@ +package smt + +import ( + "bytes" + "context" + "fmt" + "math/big" + + "github.com/gateway-fm/cdk-erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon/smt/pkg/utils" + "github.com/ledgerwatch/erigon/turbo/trie" +) + +type SMTProofElement struct { + Path []byte + Proof []byte +} + +// FilterProofs filters the proofs to only include the ones that match the given key +func FilterProofs(proofs []*SMTProofElement, key utils.NodeKey) []hexutility.Bytes { + filteredProofs := make([]hexutility.Bytes, 0) + keyPath := key.GetPath() + + keyPathInBytes := make([]byte, len(keyPath)) + for i, v := range keyPath { + keyPathInBytes[i] = byte(v) + } + + for _, proof := range proofs { + if bytes.HasPrefix(keyPathInBytes, proof.Path) { + proofClone := make([]byte, len(proof.Proof)) + copy(proofClone, proof.Proof) + filteredProofs = append(filteredProofs, proofClone) + } + } + + return filteredProofs +} + +// BuildProofs builds proofs for multiple accounts and storage slots by traversing the SMT once. +// It efficiently generates proofs for all requested keys in a single pass. +// +// s: The read-only SMT to traverse +// rd: The retain decider that determines which nodes to include in the proof +// ctx: Context for cancellation +// +// Returns a slice of SMTProofElement containing the proof for each retained node, +// or an error if the traversal fails. +func BuildProofs(s *RoSMT, rd trie.RetainDecider, ctx context.Context) ([]*SMTProofElement, error) { + proofs := make([]*SMTProofElement, 0) + + root, err := s.DbRo.GetLastRoot() + if err != nil { + return nil, err + } + + action := func(prefix []byte, k utils.NodeKey, v utils.NodeValue12) (bool, error) { + retain := rd.Retain(prefix) + + if !retain { + return false, nil + } + + nodeBytes := make([]byte, 64) + utils.ArrayToScalar(v.Get0to4()[:]).FillBytes(nodeBytes[:32]) + utils.ArrayToScalar(v.Get4to8()[:]).FillBytes(nodeBytes[32:]) + + if v.IsFinalNode() { + nodeBytes = append(nodeBytes, 1) + } + + proofs = append(proofs, &SMTProofElement{ + Path: prefix, + Proof: nodeBytes, + }) + + if v.IsFinalNode() { + valHash := v.Get4to8() + v, err := s.DbRo.Get(*valHash) + if err != nil { + return false, err + } + + vInBytes := utils.ArrayBigToScalar(utils.BigIntArrayFromNodeValue8(v.GetNodeValue8())).Bytes() + + proofs = append(proofs, &SMTProofElement{ + Path: prefix, + Proof: vInBytes, + }) + + return false, nil + } + + return true, nil + } + + err = s.Traverse(ctx, root, action) + if err != nil { + return nil, err + } + + return proofs, nil +} + +// VerifyAndGetVal verifies a proof against a given state root and key, and returns the associated value if valid. +// +// Parameters: +// - stateRoot: The root node key to verify the proof against. +// - proof: A slice of byte slices representing the proof elements. +// - key: The node key for which the proof is being verified. +// +// Returns: +// - []byte: The value associated with the key. If the key does not exist in the proof, the value returned will be nil. +// - error: An error if the proof is invalid or verification fails. +// +// This function walks through the provided proof, verifying each step against the expected +// state root. It handles both branch and leaf nodes in the Sparse Merkle Tree. If the proof +// is valid and and value exists, it returns the value associated with the given key. If the proof is valid and +// the value does not exist, the value returned will be nil. If the proof is invalid at any point, an error is returned explaining where the verification failed. +// +// The function expects the proof to be in a specific format, with each element being either +// 64 bytes (for branch nodes) or 65 bytes (for leaf nodes, with the last byte indicating finality). +// It uses the utils package for various operations like hashing and key manipulation. +func VerifyAndGetVal(stateRoot utils.NodeKey, proof []hexutility.Bytes, key utils.NodeKey) ([]byte, error) { + if len(proof) == 0 { + return nil, fmt.Errorf("proof is empty") + } + + path := key.GetPath() + + curRoot := stateRoot + + foundValue := false + + for i := 0; i < len(proof); i++ { + isFinalNode := len(proof[i]) == 65 + + capacity := utils.BranchCapacity + + if isFinalNode { + capacity = utils.LeafCapacity + } + + leftChild := utils.ScalarToArray(big.NewInt(0).SetBytes(proof[i][:32])) + rightChild := utils.ScalarToArray(big.NewInt(0).SetBytes(proof[i][32:64])) + + leftChildNode := [4]uint64{leftChild[0], leftChild[1], leftChild[2], leftChild[3]} + rightChildNode := [4]uint64{rightChild[0], rightChild[1], rightChild[2], rightChild[3]} + + h, err := utils.Hash(utils.ConcatArrays4(leftChildNode, rightChildNode), capacity) + + if err != nil { + return nil, err + } + + if curRoot != h { + return nil, fmt.Errorf("root mismatch at level %d, expected %d, got %d", i, curRoot, h) + } + + if !isFinalNode { + if path[i] == 0 { + curRoot = leftChildNode + } else { + curRoot = rightChildNode + } + + // If the current root is zero, non-existence has been proven and we can return nil from here + if curRoot.IsZero() { + return nil, nil + } + } else { + joinedKey := utils.JoinKey(path[:i], leftChildNode) + if joinedKey.IsEqualTo(key) { + foundValue = true + curRoot = rightChildNode + break + } else { + // If the joined key is not equal to the input key, the proof is sufficient to verify the non-existence of the value, so we return nil from here + return nil, nil + } + } + } + + // If we've made it through the loop without finding the value, the proof is insufficient to verify the non-existence of the value + if !foundValue { + return nil, fmt.Errorf("proof is insufficient to verify the non-existence of the value") + } + + v := new(big.Int).SetBytes(proof[len(proof)-1]) + x := utils.ScalarToArrayBig(v) + nodeValue, err := utils.NodeValue8FromBigIntArray(x) + if err != nil { + return nil, err + } + + h, err := utils.Hash(nodeValue.ToUintArray(), utils.BranchCapacity) + + if err != nil { + return nil, err + } + + if h != curRoot { + return nil, fmt.Errorf("root mismatch at level %d, expected %d, got %d", len(proof)-1, curRoot, h) + } + + return proof[len(proof)-1], nil +} diff --git a/smt/pkg/smt/proof_test.go b/smt/pkg/smt/proof_test.go new file mode 100644 index 00000000000..f6d92be48a1 --- /dev/null +++ b/smt/pkg/smt/proof_test.go @@ -0,0 +1,236 @@ +package smt_test + +import ( + "bytes" + "context" + "fmt" + "reflect" + "strings" + "testing" + + libcommon "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/gateway-fm/cdk-erigon-lib/common/hexutility" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/smt/pkg/utils" + "github.com/ledgerwatch/erigon/turbo/trie" +) + +func TestFilterProofs(t *testing.T) { + tests := []struct { + name string + proofs []*smt.SMTProofElement + key utils.NodeKey + expected []hexutility.Bytes + }{ + { + name: "Matching proofs", + proofs: []*smt.SMTProofElement{ + {Path: []byte{0, 1}, Proof: []byte{1, 2, 3}}, + {Path: []byte{0, 1, 1}, Proof: []byte{4, 5, 6}}, + {Path: []byte{1, 1}, Proof: []byte{7, 8, 9}}, + }, + key: utils.NodeKey{0, 1, 1, 1}, + expected: []hexutility.Bytes{{1, 2, 3}, {4, 5, 6}}, + }, + { + name: "No matching proofs", + proofs: []*smt.SMTProofElement{ + {Path: []byte{1, 1}, Proof: []byte{1, 2, 3}}, + {Path: []byte{1, 0}, Proof: []byte{4, 5, 6}}, + }, + key: utils.NodeKey{0, 1, 1, 1}, + expected: []hexutility.Bytes{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := smt.FilterProofs(tt.proofs, tt.key) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("FilterProofs() = %v, want %v", result, tt.expected) + } + }) + } +} + +func TestVerifyAndGetVal(t *testing.T) { + smtTrie, rl := prepareSMT(t) + + proofs, err := smt.BuildProofs(smtTrie.RoSMT, rl, context.Background()) + if err != nil { + t.Fatalf("BuildProofs() error = %v", err) + } + + contractAddress := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") + a := utils.ConvertHexToBigInt(contractAddress.String()) + address := utils.ScalarToArrayBig(a) + + smtRoot, _ := smtTrie.RoSMT.DbRo.GetLastRoot() + if err != nil { + t.Fatalf("GetLastRoot() error = %v", err) + } + root := utils.ScalarToRoot(smtRoot) + + t.Run("Value exists and proof is correct", func(t *testing.T) { + storageKey, err := utils.KeyContractStorage(address, libcommon.HexToHash("0x5").String()) + + if err != nil { + t.Fatalf("KeyContractStorage() error = %v", err) + } + + storageProof := smt.FilterProofs(proofs, storageKey) + + val, err := smt.VerifyAndGetVal(root, storageProof, storageKey) + + if err != nil { + t.Fatalf("VerifyAndGetVal() error = %v", err) + } + + expected := uint256.NewInt(0xdeadbeef).Bytes() + + if !bytes.Equal(val, expected) { + t.Errorf("VerifyAndGetVal() = %v, want %v", val, expected) + } + }) + + t.Run("Value doesn't exist and non-existent proof is correct", func(t *testing.T) { + nonExistentRl := trie.NewRetainList(0) + nonExistentKeys := []utils.NodeKey{} + + // Fuzz with 1000 non-existent keys + for i := 0; i < 1000; i++ { + nonExistentKey, err := utils.KeyContractStorage( + address, + libcommon.HexToHash(fmt.Sprintf("0xdeadbeefabcd1234%d", i)).String(), + ) + + nonExistentKeys = append(nonExistentKeys, nonExistentKey) + + if err != nil { + t.Fatalf("KeyContractStorage() error = %v", err) + } + + nonExistentKeyPath := nonExistentKey.GetPath() + + keyBytes := make([]byte, 0, len(nonExistentKeyPath)) + + for _, v := range nonExistentKeyPath { + keyBytes = append(keyBytes, byte(v)) + } + + nonExistentRl.AddHex(keyBytes) + } + + nonExistentProofs, err := smt.BuildProofs(smtTrie.RoSMT, nonExistentRl, context.Background()) + if err != nil { + t.Fatalf("BuildProofs() error = %v", err) + } + + for _, key := range nonExistentKeys { + nonExistentProof := smt.FilterProofs(nonExistentProofs, key) + val, err := smt.VerifyAndGetVal(root, nonExistentProof, key) + + if err != nil { + t.Fatalf("VerifyAndGetVal() error = %v", err) + } + + if len(val) != 0 { + t.Errorf("VerifyAndGetVal() = %v, want empty value", val) + } + } + }) + + t.Run("Value doesn't exist but non-existent proof is insufficient", func(t *testing.T) { + nonExistentRl := trie.NewRetainList(0) + nonExistentKey, _ := utils.KeyContractStorage(address, libcommon.HexToHash("0x999").String()) + nonExistentKeyPath := nonExistentKey.GetPath() + keyBytes := make([]byte, 0, len(nonExistentKeyPath)) + + for _, v := range nonExistentKeyPath { + keyBytes = append(keyBytes, byte(v)) + } + + nonExistentRl.AddHex(keyBytes) + + nonExistentProofs, err := smt.BuildProofs(smtTrie.RoSMT, nonExistentRl, context.Background()) + if err != nil { + t.Fatalf("BuildProofs() error = %v", err) + } + + nonExistentProof := smt.FilterProofs(nonExistentProofs, nonExistentKey) + + // Verify the non-existent proof works + _, err = smt.VerifyAndGetVal(root, nonExistentProof, nonExistentKey) + + if err != nil { + t.Fatalf("VerifyAndGetVal() error = %v", err) + } + + // Only pass the first trie node in the proof + _, err = smt.VerifyAndGetVal(root, nonExistentProof[:1], nonExistentKey) + + if err == nil { + t.Errorf("VerifyAndGetVal() expected error, got nil") + } + }) + + t.Run("Value exists but proof is incorrect (first value corrupted)", func(t *testing.T) { + storageKey, _ := utils.KeyContractStorage(address, libcommon.HexToHash("0x5").String()) + storageProof := smt.FilterProofs(proofs, storageKey) + + // Corrupt the proof by changing a byte + if len(storageProof) > 0 && len(storageProof[0]) > 0 { + storageProof[0][0] ^= 0xFF // Flip all bits in the first byte + } + + _, err := smt.VerifyAndGetVal(root, storageProof, storageKey) + + if err == nil { + if err == nil || !strings.Contains(err.Error(), "root mismatch at level 0") { + t.Errorf("VerifyAndGetVal() expected error containing 'root mismatch at level 0', got %v", err) + } + } + }) + + t.Run("Value exists but proof is incorrect (last value corrupted)", func(t *testing.T) { + storageKey, _ := utils.KeyContractStorage(address, libcommon.HexToHash("0x5").String()) + storageProof := smt.FilterProofs(proofs, storageKey) + + // Corrupt the proof by changing the last byte of the last proof element + if len(storageProof) > 0 { + lastProof := storageProof[len(storageProof)-1] + if len(lastProof) > 0 { + lastProof[len(lastProof)-1] ^= 0xFF // Flip all bits in the last byte + } + } + + _, err := smt.VerifyAndGetVal(root, storageProof, storageKey) + + if err == nil { + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("root mismatch at level %d", len(storageProof)-1)) { + t.Errorf("VerifyAndGetVal() expected error containing 'root mismatch at level %d', got %v", len(storageProof)-1, err) + } + } + }) + + t.Run("Value exists but proof is insufficient", func(t *testing.T) { + storageKey, _ := utils.KeyContractStorage(address, libcommon.HexToHash("0x5").String()) + storageProof := smt.FilterProofs(proofs, storageKey) + + // Modify the proof to claim the value doesn't exist + if len(storageProof) > 0 { + storageProof = storageProof[:len(storageProof)-2] + } + + val, err := smt.VerifyAndGetVal(root, storageProof, storageKey) + + if err == nil || !strings.Contains(err.Error(), "insufficient") { + t.Errorf("VerifyAndGetVal() expected error containing 'insufficient', got %v", err) + } + + if len(val) != 0 { + t.Errorf("VerifyAndGetVal() = %v, want empty value", val) + } + }) +} diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go index f0fa1b04340..90d7e0c40ba 100644 --- a/turbo/rpchelper/rpc_block.go +++ b/turbo/rpchelper/rpc_block.go @@ -26,7 +26,7 @@ func GetLatestBlockNumber(tx kv.Tx) (uint64, error) { } } - blockNum, err := stages.GetStageProgress(tx, stages.Execution) + blockNum, err := stages.GetStageProgress(tx, stages.Finish) if err != nil { return 0, fmt.Errorf("getting latest block number: %w", err) } From 31dc6f6992058d9e6833af978860bd838b235823 Mon Sep 17 00:00:00 2001 From: Jerry Date: Thu, 29 Aug 2024 09:54:51 -0700 Subject: [PATCH 2/7] Delete rolled back L1 sequence on sequence rollback event (#1022) * Delete rolled back L1 sequence info on sequence rollback event * Address CR comments --- eth/backend.go | 1 + zk/contracts/l1_contracts.go | 1 + zk/hermez_db/db.go | 21 +++++++++++++++++++++ zk/stages/stage_l1syncer.go | 11 +++++++++++ 4 files changed, 34 insertions(+) diff --git a/eth/backend.go b/eth/backend.go index aad639d4c4c..6dbfd0c6506 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -799,6 +799,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { seqAndVerifTopics := [][]libcommon.Hash{{ contracts.SequencedBatchTopicPreEtrog, contracts.SequencedBatchTopicEtrog, + contracts.RollbackBatchesTopic, contracts.VerificationTopicPreEtrog, contracts.VerificationTopicEtrog, contracts.VerificationValidiumTopicEtrog, diff --git a/zk/contracts/l1_contracts.go b/zk/contracts/l1_contracts.go index 7c4bb7f3d80..7ba052d7872 100644 --- a/zk/contracts/l1_contracts.go +++ b/zk/contracts/l1_contracts.go @@ -16,4 +16,5 @@ var ( AddNewRollupTypeTopic = common.HexToHash("0xa2970448b3bd66ba7e524e7b2a5b9cf94fa29e32488fb942afdfe70dd4b77b52") CreateNewRollupTopic = common.HexToHash("0x194c983456df6701c6a50830b90fe80e72b823411d0d524970c9590dc277a641") UpdateRollupTopic = common.HexToHash("0xf585e04c05d396901170247783d3e5f0ee9c1df23072985b50af089f5e48b19d") + RollbackBatchesTopic = common.HexToHash("0x1125aaf62d132d8e2d02005114f8fc360ff204c3105e4f1a700a1340dc55d5b1") ) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 24de3570578..b3e6319a9d4 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -528,6 +528,27 @@ func (db *HermezDb) WriteSequence(l1BlockNo, batchNo uint64, l1TxHash, stateRoot return db.tx.Put(L1SEQUENCES, ConcatKey(l1BlockNo, batchNo), val) } +// RollbackSequences deletes the sequences up to the given batch number +func (db *HermezDb) RollbackSequences(batchNo uint64) error { + for { + latestSequence, err := db.GetLatestSequence() + if err != nil { + return err + } + + if latestSequence == nil || latestSequence.BatchNo <= batchNo { + break + } + + err = db.tx.Delete(L1SEQUENCES, ConcatKey(latestSequence.L1BlockNo, latestSequence.BatchNo)) + if err != nil { + return err + } + } + + return nil +} + func (db *HermezDb) TruncateSequences(l2BlockNo uint64) error { batchNo, err := db.GetBatchNoByL2Block(l2BlockNo) if err != nil { diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1syncer.go index 513351e997a..8362247b8a4 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1syncer.go @@ -149,6 +149,13 @@ Loop: highestWrittenL1BlockNo = info.L1BlockNo } newSequencesCount++ + case logRollbackBatches: + if err := hermezDb.RollbackSequences(info.BatchNo); err != nil { + return fmt.Errorf("failed to write rollback sequence, %w", err) + } + if info.L1BlockNo > highestWrittenL1BlockNo { + highestWrittenL1BlockNo = info.L1BlockNo + } case logVerify: if info.BatchNo > highestVerification.BatchNo { highestVerification = info @@ -222,6 +229,7 @@ var ( logSequence BatchLogType = 1 logVerify BatchLogType = 2 logL1InfoTreeUpdate BatchLogType = 4 + logRollbackBatches BatchLogType = 5 logIncompatible BatchLogType = 100 ) @@ -265,6 +273,9 @@ func parseLogType(l1RollupId uint64, log *ethTypes.Log) (l1BatchInfo types.L1Bat } case contracts.UpdateL1InfoTreeTopic: batchLogType = logL1InfoTreeUpdate + case contracts.RollbackBatchesTopic: + batchLogType = logRollbackBatches + batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() default: batchLogType = logUnknown batchNum = 0 From df10cf880a94a7a0d7d936f46cd6c45878672291 Mon Sep 17 00:00:00 2001 From: hexoscott <70711990+hexoscott@users.noreply.github.com> Date: Thu, 29 Aug 2024 19:40:42 +0100 Subject: [PATCH 3/7] use correct l1 info index for counters and send full batch to executor (#1069) --- core/vm/zk_batch_counters.go | 2 +- zk/legacy_executor_verifier/executor.go | 3 ++- zk/stages/stage_sequence_execute.go | 23 +++++++++++------------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index 3c868137208..16dc10bb74f 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -230,7 +230,7 @@ func (bcc *BatchCounterCollector) CombineCollectors(verifyMerkleProof bool) (Cou return combined, nil } -// CombineCollectors takes the batch level data from all transactions and combines these counters with each transactions' +// CombineCollectorsNoChanges takes the batch level data from all transactions and combines these counters with each transactions' // rlp level counters and execution level counters // this one returns the counters as they are so far, without adding processBatchLevelData, processChangeL2Block and decodeChangeL2BlockTx // used to save batch counter progress without adding the said counters twice diff --git a/zk/legacy_executor_verifier/executor.go b/zk/legacy_executor_verifier/executor.go index c86d8be8cc7..b4f2aa21f37 100644 --- a/zk/legacy_executor_verifier/executor.go +++ b/zk/legacy_executor_verifier/executor.go @@ -234,7 +234,8 @@ func (e *Executor) Verify(p *Payload, request *VerifierRequest, oldStateRoot com "grpcUrl", e.grpcUrl, "batch", request.BatchNumber, "blocks-count", len(resp.BlockResponses), - "counters", counters, + "our-counters", request.Counters, + "exec-counters", counters, "exec-root", common.BytesToHash(resp.NewStateRoot), "our-root", request.StateRoot, "exec-old-root", common.BytesToHash(resp.OldStateRoot), diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 2f3ab896024..c4ee896bfbf 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -157,11 +157,6 @@ func SpawnSequencingStage( } } - l1InfoIndex, err := sdb.hermezDb.GetBlockL1InfoTreeIndex(blockNumber - 1) - if err != nil { - return err - } - header, parentBlock, err := prepareHeader(sdb.tx, blockNumber-1, batchState.blockState.getDeltaTimestamp(), batchState.getBlockHeaderForcedTimestamp(), batchState.forkId, batchState.getCoinbase(&cfg)) if err != nil { return err @@ -175,18 +170,18 @@ func SpawnSequencingStage( // timer: evm + smt t := utils.StartTimer("stage_sequence_execute", "evm", "smt") - overflowOnNewBlock, err := batchCounters.StartNewBlock(l1InfoIndex != 0) + infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, batchState, header.Time) if err != nil { return err } - if !batchState.isAnyRecovery() && overflowOnNewBlock { - break - } - infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err := prepareL1AndInfoTreeRelatedStuff(sdb, batchState, header.Time) + overflowOnNewBlock, err := batchCounters.StartNewBlock(l1TreeUpdateIndex != 0) if err != nil { return err } + if !batchState.isAnyRecovery() && overflowOnNewBlock { + break + } ibs := state.New(sdb.stateReader) getHashFn := core.GetHashFn(header, func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) }) @@ -250,7 +245,7 @@ func SpawnSequencingStage( // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1InfoIndex, &backupDataSizeChecker) + receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1TreeUpdateIndex, &backupDataSizeChecker) if err != nil { if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") @@ -359,7 +354,11 @@ func SpawnSequencingStage( // do not use remote executor in l1recovery mode // if we need remote executor in l1 recovery then we must allow commit/start DB transactions useExecutorForVerification := !batchState.isL1Recovery() && batchState.hasExecutorForThisBatch - cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), batchCounters.CombineCollectorsNoChanges().UsedAsMap(), batchState.builtBlocks, useExecutorForVerification, batchContext.cfg.zk.SequencerBatchVerificationTimeout) + counters, err := batchCounters.CombineCollectors(l1TreeUpdateIndex != 0) + if err != nil { + return err + } + cfg.legacyVerifier.StartAsyncVerification(batchState.forkId, batchState.batchNumber, block.Root(), counters.UsedAsMap(), batchState.builtBlocks, useExecutorForVerification, batchContext.cfg.zk.SequencerBatchVerificationTimeout) // check for new responses from the verifier needsUnwind, err := updateStreamAndCheckRollback(batchContext, batchState, streamWriter, u) From 512790f194d65ec27dc1f67da09aec50689795af Mon Sep 17 00:00:00 2001 From: Jerry Date: Thu, 29 Aug 2024 13:07:56 -0700 Subject: [PATCH 4/7] Update kurtosis version (#1074) --- .github/workflows/ci_zkevm.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 4d10aedee8a..2f4e5b11991 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -72,7 +72,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: v0.2.4 + ref: v0.2.7 path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -111,7 +111,7 @@ jobs: - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash - run: timeout 900s .github/actions/monitor-cdk-verified-batches/batch_verification_monitor.sh 20 900 cdk-erigon-node-001 + run: timeout 900s .github/scripts/monitor-verified-batches.sh --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc) --target 20 --timeout 900 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 From 867e99c9249df165cba279abfc53b7c6053d12ee Mon Sep 17 00:00:00 2001 From: hexoscott <70711990+hexoscott@users.noreply.github.com> Date: Fri, 30 Aug 2024 10:20:04 +0100 Subject: [PATCH 5/7] do not unwind/prune l1 sync data (#1072) As we unwind now on sequencer restart this process can take a long time on each sequencer restart. As we only sync finalised data from the L1 this will not change anyway so no point unwinding it --- zk/stages/stage_l1syncer.go | 73 ++----------------------------------- 1 file changed, 3 insertions(+), 70 deletions(-) diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1syncer.go index 8362247b8a4..d7edde1d5ac 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1syncer.go @@ -291,80 +291,13 @@ func parseLogType(l1RollupId uint64, log *ethTypes.Log) (l1BatchInfo types.L1Bat } func UnwindL1SyncerStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg L1SyncerCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - log.Debug("l1 sync: unwinding") - - /* - 1. unwind sequences table - 2. unwind verifications table - 3. update l1verifications batchno and l1syncer stage progress - */ - - err = tx.ClearBucket(hermez_db.L1SEQUENCES) - if err != nil { - return err - } - err = tx.ClearBucket(hermez_db.L1VERIFICATIONS) - if err != nil { - return err - } - - // the below are very inefficient due to key layout - //hermezDb := hermez_db.NewHermezDb(tx) - //err = hermezDb.TruncateSequences(u.UnwindPoint) - //if err != nil { - // return err - //} - // - //err = hermezDb.TruncateVerifications(u.UnwindPoint) - //if err != nil { - // return err - //} - // get the now latest l1 verification - //v, err := hermezDb.GetLatestVerification() - //if err != nil { - // return err - //} - - if err := stages.SaveStageProgress(tx, stages.L1VerificationsBatchNo, 0); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) - } - if err := stages.SaveStageProgress(tx, stages.L1Syncer, 0); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } + // we want to keep L1 data during an unwind, as we only sync finalised data there should be + // no need to unwind here return nil } func PruneL1SyncerStage(s *stagedsync.PruneState, tx kv.RwTx, cfg L1SyncerCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - // TODO: implement prune L1 Verifications stage! (if required) - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } + // no need to prune this data return nil } From 77d07b8903c1aa3f86ba6be0955685aa5ea52837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Fri, 30 Aug 2024 12:28:43 +0200 Subject: [PATCH 6/7] Include additional information to output batches MDBX browser tool (#1047) * Populate additional details in the batch (WIP) * Use the const for the fork id identifier * GetBatchDataByNumbers add error handling and remove transactions collection * Populate additional fields for the batches * Implement output-batch-affiliation command * Fix tests * Fix DbDataRetriever UT --- cmd/rpcdaemon/commands/zkevm_api.go | 62 +--- core/state/intra_block_state_zkevm.go | 6 + zk/debug_tools/mdbx-data-browser/README.md | 21 ++ zk/debug_tools/mdbx-data-browser/cli.go | 47 ++- .../mdbx-data-browser/dbdata_retriever.go | 278 +++++++++++++----- .../dbdata_retriever_test.go | 103 ++++++- zk/debug_tools/mdbx-data-browser/main.go | 1 + zk/utils/utils.go | 52 ++++ 8 files changed, 436 insertions(+), 134 deletions(-) diff --git a/cmd/rpcdaemon/commands/zkevm_api.go b/cmd/rpcdaemon/commands/zkevm_api.go index c1a38ded839..e9788ca89e9 100644 --- a/cmd/rpcdaemon/commands/zkevm_api.go +++ b/cmd/rpcdaemon/commands/zkevm_api.go @@ -30,6 +30,7 @@ import ( smt "github.com/ledgerwatch/erigon/smt/pkg/smt" smtUtils "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/zk/constants" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" types "github.com/ledgerwatch/erigon/zk/rpcdaemon" @@ -299,6 +300,9 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers if syncing != nil && syncing != false { bn := syncing.(map[string]interface{})["currentBlock"] highestBatchNo, err = hermezDb.GetBatchNoByL2Block(uint64(bn.(hexutil.Uint64))) + if err != nil { + return nil, err + } } bds := make([]*types.BatchDataSlim, 0, len(batchNumbers.Numbers)) @@ -342,7 +346,6 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers // collect blocks in batch var batchBlocks []*eritypes.Block - var batchTxs []eritypes.Transaction // handle genesis - not in the hermez tables so requires special treament if batchNumber == 0 { blk, err := api.ethApi.BaseAPI.blockByNumberWithSenders(tx, 0) @@ -358,9 +361,6 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers return nil, err } batchBlocks = append(batchBlocks, blk) - for _, btx := range blk.Transactions() { - batchTxs = append(batchTxs, btx) - } } // batch l2 data - must build on the fly @@ -369,7 +369,7 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers return nil, err } - batchL2Data, err := generateBatchData(tx, hermezDb, batchBlocks, forkId) + batchL2Data, err := utils.GenerateBatchData(tx, hermezDb, batchBlocks, forkId) if err != nil { return nil, err } @@ -381,54 +381,6 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers return populateBatchDataSlimDetails(bds) } -func generateBatchData( - tx kv.Tx, - hermezDb *hermez_db.HermezDbReader, - batchBlocks []*eritypes.Block, - forkId uint64, -) (batchL2Data []byte, err error) { - - lastBlockNoInPreviousBatch := uint64(0) - if batchBlocks[0].NumberU64() != 0 { - lastBlockNoInPreviousBatch = batchBlocks[0].NumberU64() - 1 - } - - lastBlockInPreviousBatch, err := rawdb.ReadBlockByNumber(tx, lastBlockNoInPreviousBatch) - if err != nil { - return nil, err - } - - batchL2Data = []byte{} - for i := 0; i < len(batchBlocks); i++ { - var dTs uint32 - if i == 0 { - dTs = uint32(batchBlocks[i].Time() - lastBlockInPreviousBatch.Time()) - } else { - dTs = uint32(batchBlocks[i].Time() - batchBlocks[i-1].Time()) - } - iti, err := hermezDb.GetBlockL1InfoTreeIndex(batchBlocks[i].NumberU64()) - if err != nil { - return nil, err - } - egTx := make(map[common.Hash]uint8) - for _, txn := range batchBlocks[i].Transactions() { - eg, err := hermezDb.GetEffectiveGasPricePercentage(txn.Hash()) - if err != nil { - return nil, err - } - egTx[txn.Hash()] = eg - } - - bl2d, err := zktx.GenerateBlockBatchL2Data(uint16(forkId), dTs, uint32(iti), batchBlocks[i].Transactions(), egTx) - if err != nil { - return nil, err - } - batchL2Data = append(batchL2Data, bl2d...) - } - - return batchL2Data, err -} - // GetBatchByNumber returns a batch from the current canonical chain. If number is nil, the // latest known batch is returned. func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.BlockNumber, fullTx *bool) (json.RawMessage, error) { @@ -670,7 +622,7 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B return nil, err } - batchL2Data, err := generateBatchData(tx, hermezDb, batchBlocks, forkId) + batchL2Data, err := utils.GenerateBatchData(tx, hermezDb, batchBlocks, forkId) if err != nil { return nil, err } @@ -686,7 +638,7 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B // forkid exit roots logic // if forkid < 12 then we should only set the exit roots if they have changed, otherwise 0x00..00 // if forkid >= 12 then we should always set the exit roots - if forkId < 12 { + if forkId < uint64(constants.ForkID12Banana) { // get the previous batches exit roots prevBatchNo := batchNo - 1 prevBatchHighestBlock, err := hermezDb.GetHighestBlockInBatch(prevBatchNo) diff --git a/core/state/intra_block_state_zkevm.go b/core/state/intra_block_state_zkevm.go index c39287f3ff4..f903600d1f6 100644 --- a/core/state/intra_block_state_zkevm.go +++ b/core/state/intra_block_state_zkevm.go @@ -34,13 +34,19 @@ type ReadOnlyHermezDb interface { GetIntermediateTxStateRoot(blockNum uint64, txhash libcommon.Hash) (libcommon.Hash, error) GetReusedL1InfoTreeIndex(blockNum uint64) (bool, error) GetSequenceByBatchNo(batchNo uint64) (*zktypes.L1BatchInfo, error) + GetSequenceByBatchNoOrHighest(batchNo uint64) (*zktypes.L1BatchInfo, error) GetHighestBlockInBatch(batchNo uint64) (uint64, error) GetLowestBlockInBatch(batchNo uint64) (uint64, bool, error) GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error) GetBatchGlobalExitRoot(batchNum uint64) (*dstypes.GerUpdate, error) GetVerificationByBatchNo(batchNo uint64) (*zktypes.L1BatchInfo, error) + GetVerificationByBatchNoOrHighest(batchNo uint64) (*zktypes.L1BatchInfo, error) GetL1BatchData(batchNumber uint64) ([]byte, error) GetL1InfoTreeUpdateByGer(ger libcommon.Hash) (*zktypes.L1InfoTreeUpdate, error) + GetBlockL1InfoTreeIndex(blockNumber uint64) (uint64, error) + GetBlockInfoRoot(blockNumber uint64) (libcommon.Hash, error) + GetLastBlockGlobalExitRoot(l2BlockNo uint64) (libcommon.Hash, uint64, error) + GetForkId(batchNo uint64) (uint64, error) } func (sdb *IntraBlockState) GetTxCount() (uint64, error) { diff --git a/zk/debug_tools/mdbx-data-browser/README.md b/zk/debug_tools/mdbx-data-browser/README.md index 8880ba09383..f8c06d7cd43 100644 --- a/zk/debug_tools/mdbx-data-browser/README.md +++ b/zk/debug_tools/mdbx-data-browser/README.md @@ -60,6 +60,21 @@ In case `file-output` flag is provided, results are printed to a JSON file (othe **Note:** In case, `output-blocks` is ran with `verbose` flag provided, it is necessary to provide the proper chain id to the `params/chainspecs/mainnet.json`. This is the case, because CDK Erigon (for now) uses hardcoded data to recover transaction senders, and chain id information is read from the mentioned file. +#### `output-batch-affiliation` +It is used to output the batch numbers alongside with the blocks that affiliate to the certain batch. + +- **Name**: `output-batch-affiliation` +- **Usage**: Outputs batch affiliation of certain blocks. +- **Action**: `dumpBatchAffiliation` +- **Flags**: + - `data-dir`: Specifies the data directory to use. + - `bn`: Block numbers. + - **Name**: `bn` + - **Usage**: Block numbers. + - **Destination**: `batchOrBlockNumbers` + - `verbose`: See [verbose](#verbose) flag. + - `file-output`: See [file-output](#file-output) flag. + ### Example Usage **Pre-requisite:** Navigate to the `zk/debug_tools/mdbx-data-browser` folder and run `go build -o mdbx-data-browser` @@ -75,3 +90,9 @@ In case `file-output` flag is provided, results are printed to a JSON file (othe ```sh ./mdbx-data-browser output-blocks --datadir chaindata/ --bn 100,101,102 [--verbose] [--file-output] ``` + +#### `output-batch-affiliation` Command + +```sh +./mdbx-data-browser output-batch-affiliation --datadir chaindata/ --bn 100,101,102 [--verbose] [--file-output] +``` diff --git a/zk/debug_tools/mdbx-data-browser/cli.go b/zk/debug_tools/mdbx-data-browser/cli.go index f29abcd1ea7..ff25b4a78cb 100644 --- a/zk/debug_tools/mdbx-data-browser/cli.go +++ b/zk/debug_tools/mdbx-data-browser/cli.go @@ -67,6 +67,21 @@ var ( }, } + getBatchAffiliationCmd = &cli.Command{ + Action: dumpBatchAffiliation, + Name: "output-batch-affiliation", + Usage: "Outputs batch affiliation for provided block numbers", + Flags: []cli.Flag{ + &utils.DataDirFlag, + &cli.Uint64SliceFlag{ + Name: "bn", + Usage: "Block numbers", + Destination: batchOrBlockNumbers, + }, + fileOutputFlag, + }, + } + // parameters chainDataDir string batchOrBlockNumbers *cli.Uint64Slice = cli.NewUint64Slice() @@ -148,6 +163,36 @@ func dumpBlocksByNumbers(cliCtx *cli.Context) error { return nil } +// dumpBatchAffiliation retrieves batch numbers by given block numbers and dumps them either on standard output or to a file +func dumpBatchAffiliation(cliCtx *cli.Context) error { + if !cliCtx.IsSet(utils.DataDirFlag.Name) { + return errors.New("chain data directory is not provided") + } + + chainDataDir = cliCtx.String(utils.DataDirFlag.Name) + + tx, cleanup, err := createDbTx(chainDataDir, cliCtx.Context) + if err != nil { + return fmt.Errorf("failed to create read-only db transaction: %w", err) + } + defer cleanup() + + r := NewDbDataRetriever(tx) + batchInfo, err := r.GetBatchAffiliation(batchOrBlockNumbers.Value()) + if err != nil { + return err + } + jsonBatchAffiliation, err := json.MarshalIndent(batchInfo, "", " ") + if err != nil { + return fmt.Errorf("failed to serialize batch affiliation info into the JSON format: %w", err) + } + + if err := outputResults(string(jsonBatchAffiliation)); err != nil { + return fmt.Errorf("failed to output results: %w", err) + } + return nil +} + // createDbTx creates a read-only database transaction, that allows querying it. func createDbTx(chainDataDir string, ctx context.Context) (kv.Tx, func(), error) { db := mdbx.MustOpen(chainDataDir) @@ -183,7 +228,7 @@ func outputResults(results string) error { return err } - fmt.Printf("results are written to the '%s'", path) + fmt.Printf("results are written to the '%s'\n", path) return nil } diff --git a/zk/debug_tools/mdbx-data-browser/dbdata_retriever.go b/zk/debug_tools/mdbx-data-browser/dbdata_retriever.go index 481ad62c183..41fed2fc9d3 100644 --- a/zk/debug_tools/mdbx-data-browser/dbdata_retriever.go +++ b/zk/debug_tools/mdbx-data-browser/dbdata_retriever.go @@ -3,6 +3,7 @@ package main import ( "errors" "fmt" + "sort" "github.com/gateway-fm/cdk-erigon-lib/kv" @@ -11,6 +12,8 @@ import ( coreTypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/zk/hermez_db" rpcTypes "github.com/ledgerwatch/erigon/zk/rpcdaemon" + zktx "github.com/ledgerwatch/erigon/zk/tx" + "github.com/ledgerwatch/erigon/zk/utils" ) type DbDataRetriever struct { @@ -28,76 +31,115 @@ func NewDbDataRetriever(tx kv.Tx) *DbDataRetriever { // GetBatchByNumber reads batch by number from the database func (d *DbDataRetriever) GetBatchByNumber(batchNum uint64, verboseOutput bool) (*rpcTypes.Batch, error) { - // Get highest block in batch - latestBlockInBatch, err := d.getHighestBlockInBatch(batchNum) + highestBlock, err := rawdb.ReadLastBlockSynced(d.tx) if err != nil { return nil, err } - if latestBlockInBatch == nil { - return nil, errors.New("failed to retrieve the latest block in batch") + highestBatchNo, err := d.dbReader.GetBatchNoByL2Block(highestBlock.NumberU64()) + if err != nil { + return nil, err } - // Initialize batch - batch := &rpcTypes.Batch{ - Number: rpcTypes.ArgUint64(batchNum), - Coinbase: latestBlockInBatch.Coinbase(), - StateRoot: latestBlockInBatch.Root(), - Timestamp: rpcTypes.ArgUint64(latestBlockInBatch.Time()), + // Short circuit in case the given batch number is not present in the db + if batchNum > highestBatchNo { + return nil, fmt.Errorf("batch %d does not exist (the highest persisted batch is %d)", batchNum, highestBatchNo) } - // Collect blocks in batch - if err := d.collectBlocksInBatch(batch, batchNum, verboseOutput); err != nil { + // Get highest block in batch + latestBlockInBatch, err := d.getHighestBlockInBatch(batchNum) + if err != nil { return nil, err } - // Get global exit root - ger, err := d.dbReader.GetBatchGlobalExitRoot(batchNum) + if latestBlockInBatch == nil { + return nil, errors.New("failed to retrieve the latest block in batch") + } + + // Get global exit root of the batch + batchGER, _, err := d.dbReader.GetLastBlockGlobalExitRoot(latestBlockInBatch.NumberU64()) if err != nil { return nil, err } - if ger != nil { - batch.GlobalExitRoot = ger.GlobalExitRoot + + // Initialize batch + batch := &rpcTypes.Batch{ + Number: rpcTypes.ArgUint64(batchNum), + Coinbase: latestBlockInBatch.Coinbase(), + StateRoot: latestBlockInBatch.Root(), + Timestamp: rpcTypes.ArgUint64(latestBlockInBatch.Time()), + GlobalExitRoot: batchGER, } // Get sequence - seq, err := d.dbReader.GetSequenceByBatchNo(batchNum) + seq, err := d.dbReader.GetSequenceByBatchNoOrHighest(batchNum) if err != nil { return nil, err } if seq != nil { batch.SendSequencesTxHash = &seq.L1TxHash } - batch.Closed = (seq != nil || batchNum <= 1) - // Get verification - ver, err := d.dbReader.GetVerificationByBatchNo(batchNum) + // sequenced, genesis or injected batch: + // - batches 0 and 1 will always be closed and + // - if next batch has blocks, the given batch is closed + _, lowestBlockInNextBatchExists, err := d.dbReader.GetLowestBlockInBatch(batchNum + 1) if err != nil { return nil, err } - if ver != nil { - batch.VerifyBatchTxHash = &ver.L1TxHash - } + batch.Closed = (seq != nil || batchNum <= 1 || lowestBlockInNextBatchExists) - // Get batch L2 data - batchL2Data, err := d.dbReader.GetL1BatchData(batchNum) + // Get verification + verification, err := d.dbReader.GetVerificationByBatchNoOrHighest(batchNum) if err != nil { return nil, err } - batch.BatchL2Data = batchL2Data + if verification != nil { + batch.VerifyBatchTxHash = &verification.L1TxHash + } - // Set L1 info tree if needed - if batch.GlobalExitRoot != rpcTypes.ZeroHash { - l1InfoTree, err := d.dbReader.GetL1InfoTreeUpdateByGer(batch.GlobalExitRoot) + // Set L1 info tree (Mainnet Exit Root and Rollup Exit Root) if Global Exit Root exists + if batchGER != rpcTypes.ZeroHash { + l1InfoTreeUpdate, err := d.dbReader.GetL1InfoTreeUpdateByGer(batchGER) if err != nil { return nil, err } - if l1InfoTree != nil { - batch.MainnetExitRoot = l1InfoTree.MainnetExitRoot - batch.RollupExitRoot = l1InfoTree.RollupExitRoot + if l1InfoTreeUpdate != nil { + batch.MainnetExitRoot = l1InfoTreeUpdate.MainnetExitRoot + batch.RollupExitRoot = l1InfoTreeUpdate.RollupExitRoot } } + // Get Local Exit Root + localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorageForLatestBlock(batchNum, d.dbReader, d.tx) + if err != nil { + return nil, err + } + batch.LocalExitRoot = localExitRoot + + // Generate batch l2 data on fly + forkId, err := d.dbReader.GetForkId(batchNum) + if err != nil { + return nil, err + } + + // Collect blocks in batch + batchBlocks, err := d.getBatchBlocks(batchNum) + if err != nil { + return nil, err + } + + batchL2Data, err := utils.GenerateBatchData(d.tx, d.dbReader, batchBlocks, forkId) + if err != nil { + return nil, err + } + batch.BatchL2Data = batchL2Data + + // Populate blocks and transactions to the batch + if err := d.populateBlocksAndTransactions(batch, batchBlocks, verboseOutput); err != nil { + return nil, err + } + return batch, nil } @@ -108,7 +150,7 @@ func (d *DbDataRetriever) getHighestBlockInBatch(batchNum uint64) (*coreTypes.Bl return nil, err } - if !found { + if !found && batchNum != 0 { return nil, nil } @@ -129,53 +171,109 @@ func (d *DbDataRetriever) getHighestBlockInBatch(batchNum uint64) (*coreTypes.Bl return latestBlockInBatch, nil } -// collectBlocksInBatch retrieve blocks from the batch -func (d *DbDataRetriever) collectBlocksInBatch(batch *rpcTypes.Batch, batchNum uint64, verboseOutput bool) error { - // Get block numbers in the batch - blocksInBatch, err := d.dbReader.GetL2BlockNosByBatch(batchNum) - if err != nil { - return err - } - - // Handle genesis block separately - if batchNum == 0 { - if err := d.addBlockToBatch(batch, 0, verboseOutput); err != nil { - return err +// populateBlocksAndTransactions populates blocks and transactions to the given batch. +// In case verboseOutput is set to true, entire blocks and transactions are populated. Otherwise only hashes. +func (d *DbDataRetriever) populateBlocksAndTransactions(batch *rpcTypes.Batch, blocks []*coreTypes.Block, verboseOutput bool) error { + batch.Blocks = make([]interface{}, 0, len(blocks)) + if !verboseOutput { + for _, block := range blocks { + batch.Blocks = append(batch.Blocks, block.Hash()) + for _, tx := range block.Transactions() { + batch.Transactions = append(batch.Transactions, tx.Hash()) + } } - } - - // Collect blocks and their transactions - for _, blockNum := range blocksInBatch { - if err := d.addBlockToBatch(batch, blockNum, verboseOutput); err != nil { - return err + } else { + for _, block := range blocks { + blockInfoRoot, err := d.dbReader.GetBlockInfoRoot(block.NumberU64()) + if err != nil { + return err + } + + blockGER, err := d.dbReader.GetBlockGlobalExitRoot(block.NumberU64()) + if err != nil { + return err + } + + rpcBlock, err := d.convertToRPCBlock(block, verboseOutput, verboseOutput) + if err != nil { + return err + } + + batchBlockExtra := &rpcTypes.BlockWithInfoRootAndGer{ + Block: rpcBlock, + BlockInfoRoot: blockInfoRoot, + GlobalExitRoot: blockGER, + } + + batch.Blocks = append(batch.Blocks, batchBlockExtra) + + for _, tx := range block.Transactions() { + receipt, _, _, _, err := rawdb.ReadReceipt(d.tx, tx.Hash()) + if err != nil { + return err + } + + rpcTx, err := rpcTypes.NewTransaction(tx, receipt, verboseOutput) + if err != nil { + return err + } + + l2TxHash, err := zktx.ComputeL2TxHash( + tx.GetChainID().ToBig(), + tx.GetValue(), + tx.GetPrice(), + tx.GetNonce(), + tx.GetGas(), + tx.GetTo(), + &rpcTx.From, + tx.GetData(), + ) + if err != nil { + return err + } + + if rpcTx.Receipt != nil { + rpcTx.Receipt.TransactionL2Hash = l2TxHash + } + rpcTx.L2Hash = l2TxHash + + batch.Transactions = append(batch.Transactions, rpcTx) + } } } - return nil } -// addBlockToBatch adds a block and its transactions to the batch -func (d *DbDataRetriever) addBlockToBatch(batch *rpcTypes.Batch, blockNum uint64, verboseOutput bool) error { - block, err := rawdb.ReadBlockByNumber(d.tx, blockNum) +// getBatchBlocks retrieve blocks from the provided batch number +func (d *DbDataRetriever) getBatchBlocks(batchNum uint64) ([]*coreTypes.Block, error) { + // Get block numbers in the batch + blockNums, err := d.dbReader.GetL2BlockNosByBatch(batchNum) if err != nil { - return err + return nil, err } - if verboseOutput { - batch.Blocks = append(batch.Blocks, block) - } else { - batch.Blocks = append(batch.Blocks, block.Hash()) + blocks := make([]*coreTypes.Block, 0, len(blockNums)) + + // Handle genesis block separately + if batchNum == 0 { + block, err := rawdb.ReadBlockByNumber(d.tx, 0) + if err != nil { + return nil, err + } + + blocks = append(blocks, block) } - for _, tx := range block.Transactions() { - if verboseOutput { - batch.Transactions = append(batch.Transactions, tx) - } else { - batch.Transactions = append(batch.Transactions, tx.Hash()) + for _, blockNum := range blockNums { + block, err := rawdb.ReadBlockByNumber(d.tx, blockNum) + if err != nil { + return nil, err } + + blocks = append(blocks, block) } - return nil + return blocks, nil } // GetBlockByNumber reads block based on its block number from the database @@ -190,11 +288,49 @@ func (d *DbDataRetriever) GetBlockByNumber(blockNum uint64, includeTxs, includeR return nil, fmt.Errorf("block %d not found", blockNum) } - receipts := rawdb.ReadReceipts(d.tx, block, block.Body().SendersFromTxs()) - rpcBlock, err := rpcTypes.NewBlock(block, receipts.ToSlice(), includeTxs, includeReceipts) - if err != nil { - return nil, err + return d.convertToRPCBlock(block, verboseOutput, verboseOutput) +} + +// GetBatchAffiliation retrieves the batch affiliation for the provided block numbers +func (d *DbDataRetriever) GetBatchAffiliation(blocks []uint64) ([]*BatchAffiliationInfo, error) { + batchInfoMap := make(map[uint64]*BatchAffiliationInfo) + for _, blockNum := range blocks { + batchNum, err := d.dbReader.GetBatchNoByL2Block(blockNum) + if err != nil { + return nil, err + } + + if blockNum > 0 && batchNum == 0 { + return nil, fmt.Errorf("batch is not found for block num %d", blockNum) + } + + batchInfo, exists := batchInfoMap[batchNum] + if !exists { + batchInfo = &BatchAffiliationInfo{Number: batchNum} + batchInfoMap[batchNum] = batchInfo + } + batchInfo.Blocks = append(batchInfo.Blocks, blockNum) } - return rpcBlock, nil + res := make([]*BatchAffiliationInfo, 0, len(batchInfoMap)) + for _, bi := range batchInfoMap { + res = append(res, bi) + } + + sort.Slice(res, func(i, j int) bool { + return res[i].Number < res[j].Number + }) + + return res, nil +} + +// convertToRPCBlock converts the coreTypes.Block into rpcTypes.Block +func (d *DbDataRetriever) convertToRPCBlock(block *coreTypes.Block, includeTxs, includeReceipts bool) (*rpcTypes.Block, error) { + receipts := rawdb.ReadReceipts(d.tx, block, block.Body().SendersFromTxs()) + return rpcTypes.NewBlock(block, receipts.ToSlice(), includeTxs, includeReceipts) +} + +type BatchAffiliationInfo struct { + Number uint64 `json:"batch"` + Blocks []uint64 `json:"blocks"` } diff --git a/zk/debug_tools/mdbx-data-browser/dbdata_retriever_test.go b/zk/debug_tools/mdbx-data-browser/dbdata_retriever_test.go index f6a4c58f23e..705d469800f 100644 --- a/zk/debug_tools/mdbx-data-browser/dbdata_retriever_test.go +++ b/zk/debug_tools/mdbx-data-browser/dbdata_retriever_test.go @@ -4,6 +4,7 @@ import ( "fmt" "math/big" "testing" + "time" libcommon "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/kv/memdb" @@ -12,7 +13,9 @@ import ( "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk/hermez_db" + rpcTypes "github.com/ledgerwatch/erigon/zk/rpcdaemon" ) func TestDbDataRetrieverGetBatchByNumber(t *testing.T) { @@ -24,6 +27,11 @@ func TestDbDataRetrieverGetBatchByNumber(t *testing.T) { _, dbTx := memdb.NewTestTx(t) require.NoError(t, hermez_db.CreateHermezBuckets(dbTx)) db := hermez_db.NewHermezDb(dbTx) + + block := createBlock(t, 0, nil) + require.NoError(t, rawdb.WriteCanonicalHash(dbTx, block.Hash(), block.NumberU64())) + require.NoError(t, rawdb.WriteBlock(dbTx, block)) + expectedBlockHashes := make(map[uint64]libcommon.Hash, blocksInBatch) for blockNum := uint64(1); blockNum <= blocksInBatch; blockNum++ { require.NoError(t, db.WriteBlockBatch(blockNum, batchNum)) @@ -34,6 +42,9 @@ func TestDbDataRetrieverGetBatchByNumber(t *testing.T) { expectedBlockHashes[blockNum] = block.Hash() } + err := stages.SaveStageProgress(dbTx, stages.Execution, blocksInBatch) + require.NoError(t, err) + dbReader := NewDbDataRetriever(dbTx) batch, err := dbReader.GetBatchByNumber(batchNum, true) require.NoError(t, err) @@ -41,28 +52,28 @@ func TestDbDataRetrieverGetBatchByNumber(t *testing.T) { require.Equal(t, batchNum, uint64(batch.Number)) require.Len(t, expectedBlockHashes, int(blocksInBatch)) for _, blockGeneric := range batch.Blocks { - block, ok := blockGeneric.(*types.Block) + block, ok := blockGeneric.(*rpcTypes.BlockWithInfoRootAndGer) require.True(t, ok) - expectedHash, exists := expectedBlockHashes[block.NumberU64()] + expectedHash, exists := expectedBlockHashes[uint64(block.Number)] require.True(t, exists) - require.Equal(t, expectedHash, block.Hash()) + require.Equal(t, expectedHash, block.Hash) } } func TestDbDataRetrieverGetBlockByNumber(t *testing.T) { t.Run("querying an existing block", func(t *testing.T) { // arrange - _, dbTx := memdb.NewTestTx(t) + _, tx := memdb.NewTestTx(t) tx1 := types.NewTransaction(1, libcommon.HexToAddress("0x1050"), u256.Num1, 1, u256.Num1, nil) tx2 := types.NewTransaction(2, libcommon.HexToAddress("0x100"), u256.Num27, 2, u256.Num2, nil) block := createBlock(t, 5, types.Transactions{tx1, tx2}) - require.NoError(t, rawdb.WriteCanonicalHash(dbTx, block.Hash(), block.NumberU64())) - require.NoError(t, rawdb.WriteBlock(dbTx, block)) + require.NoError(t, rawdb.WriteCanonicalHash(tx, block.Hash(), block.NumberU64())) + require.NoError(t, rawdb.WriteBlock(tx, block)) // act and assert - dbReader := NewDbDataRetriever(dbTx) + dbReader := NewDbDataRetriever(tx) result, err := dbReader.GetBlockByNumber(block.NumberU64(), true, true) require.NoError(t, err) require.Equal(t, block.Hash(), result.Hash) @@ -80,6 +91,83 @@ func TestDbDataRetrieverGetBlockByNumber(t *testing.T) { }) } +func TestDbDataRetrieverGetBatchAffiliation(t *testing.T) { + testCases := []struct { + name string + blocksInBatch int + batchesCount int + blockNums []uint64 + expectedErrMsg string + expectedResult []*BatchAffiliationInfo + }{ + { + name: "Basic case with three blocks and two requested", + blocksInBatch: 3, + batchesCount: 2, + blockNums: []uint64{1, 3}, + expectedResult: []*BatchAffiliationInfo{ + {Number: 1, Blocks: []uint64{1, 3}}, + }, + }, + { + name: "All blocks in batch requested", + blocksInBatch: 3, + batchesCount: 2, + blockNums: []uint64{4, 5, 6}, + expectedResult: []*BatchAffiliationInfo{ + {Number: 2, Blocks: []uint64{4, 5, 6}}, + }, + }, + { + name: "Request multiple batches", + blocksInBatch: 2, + batchesCount: 3, + blockNums: []uint64{1, 2, 6}, + expectedResult: []*BatchAffiliationInfo{ + {Number: 1, Blocks: []uint64{1, 2}}, + {Number: 3, Blocks: []uint64{6}}, + }, + }, + { + name: "Request non-existent block", + blocksInBatch: 2, + batchesCount: 2, + blockNums: []uint64{5}, + expectedErrMsg: "batch is not found for block num 5", + expectedResult: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, dbTx := memdb.NewTestTx(t) + require.NoError(t, hermez_db.CreateHermezBuckets(dbTx)) + db := hermez_db.NewHermezDb(dbTx) + + // Write the blocks according to the test case + blockNum := uint64(1) + for batchNum := uint64(1); batchNum <= uint64(tc.batchesCount); batchNum++ { + for i := 0; i < tc.blocksInBatch; i++ { + require.NoError(t, db.WriteBlockBatch(blockNum, batchNum)) + blockNum++ + } + } + + dbReader := NewDbDataRetriever(dbTx) + batchAffiliation, err := dbReader.GetBatchAffiliation(tc.blockNums) + + // Check if an error was expected + if tc.expectedErrMsg != "" { + require.ErrorContains(t, err, tc.expectedErrMsg) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedResult, batchAffiliation) + }) + } +} + // createBlock is a helper function, that allows creating block func createBlock(t *testing.T, number uint64, txs types.Transactions) *types.Block { t.Helper() @@ -91,6 +179,7 @@ func createBlock(t *testing.T, number uint64, txs types.Transactions) *types.Blo UncleHash: types.EmptyUncleHash, TxHash: types.EmptyRootHash, ReceiptHash: types.EmptyRootHash, + Time: uint64(time.Now().Unix()), }) if txs.Len() > 0 { diff --git a/zk/debug_tools/mdbx-data-browser/main.go b/zk/debug_tools/mdbx-data-browser/main.go index 833e801f1a0..b0741f9a3bc 100644 --- a/zk/debug_tools/mdbx-data-browser/main.go +++ b/zk/debug_tools/mdbx-data-browser/main.go @@ -16,6 +16,7 @@ func main() { app.Commands = []*cli.Command{ getBatchByNumberCmd, getBlockByNumberCmd, + getBatchAffiliationCmd, } logging.SetupLogger("mdbx data browser") diff --git a/zk/utils/utils.go b/zk/utils/utils.go index 6059a0780f2..4513ecdebbc 100644 --- a/zk/utils/utils.go +++ b/zk/utils/utils.go @@ -3,14 +3,18 @@ package utils import ( "fmt" + "github.com/gateway-fm/cdk-erigon-lib/common" libcommon "github.com/gateway-fm/cdk-erigon-lib/common" "github.com/gateway-fm/cdk-erigon-lib/kv" "github.com/ledgerwatch/erigon/chain" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/systemcontracts" + eritypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk/constants" "github.com/ledgerwatch/erigon/zk/hermez_db" + zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/log/v3" ) @@ -158,3 +162,51 @@ func GetBatchLocalExitRootFromSCStorageByBlock(blockNumber uint64, db DbReader, return libcommon.Hash{}, nil } + +func GenerateBatchData( + tx kv.Tx, + hermezDb state.ReadOnlyHermezDb, + batchBlocks []*eritypes.Block, + forkId uint64, +) (batchL2Data []byte, err error) { + lastBlockNoInPreviousBatch := uint64(0) + firstBlockInBatch := batchBlocks[0] + if firstBlockInBatch.NumberU64() != 0 { + lastBlockNoInPreviousBatch = firstBlockInBatch.NumberU64() - 1 + } + + lastBlockInPreviousBatch, err := rawdb.ReadBlockByNumber(tx, lastBlockNoInPreviousBatch) + if err != nil { + return nil, err + } + + batchL2Data = []byte{} + for i := 0; i < len(batchBlocks); i++ { + var dTs uint32 + if i == 0 { + dTs = uint32(batchBlocks[i].Time() - lastBlockInPreviousBatch.Time()) + } else { + dTs = uint32(batchBlocks[i].Time() - batchBlocks[i-1].Time()) + } + iti, err := hermezDb.GetBlockL1InfoTreeIndex(batchBlocks[i].NumberU64()) + if err != nil { + return nil, err + } + egTx := make(map[common.Hash]uint8) + for _, txn := range batchBlocks[i].Transactions() { + eg, err := hermezDb.GetEffectiveGasPricePercentage(txn.Hash()) + if err != nil { + return nil, err + } + egTx[txn.Hash()] = eg + } + + bl2d, err := zktx.GenerateBlockBatchL2Data(uint16(forkId), dTs, uint32(iti), batchBlocks[i].Transactions(), egTx) + if err != nil { + return nil, err + } + batchL2Data = append(batchL2Data, bl2d...) + } + + return batchL2Data, err +} From 344e4bc410b1581bfdd63974ae93b2a34cb753ce Mon Sep 17 00:00:00 2001 From: hexoscott <70711990+hexoscott@users.noreply.github.com> Date: Fri, 30 Aug 2024 11:33:48 +0100 Subject: [PATCH 7/7] do not use fork choice for finalised block in zk (#1075) --- turbo/rpchelper/rpc_block.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go index 90d7e0c40ba..24e900250b2 100644 --- a/turbo/rpchelper/rpc_block.go +++ b/turbo/rpchelper/rpc_block.go @@ -35,14 +35,6 @@ func GetLatestBlockNumber(tx kv.Tx) (uint64, error) { } func GetFinalizedBlockNumber(tx kv.Tx) (uint64, error) { - forkchoiceFinalizedHash := rawdb.ReadForkchoiceFinalized(tx) - if forkchoiceFinalizedHash != (libcommon.Hash{}) { - forkchoiceFinalizedNum := rawdb.ReadHeaderNumber(tx, forkchoiceFinalizedHash) - if forkchoiceFinalizedNum != nil { - return *forkchoiceFinalizedNum, nil - } - } - // get highest verified batch highestVerifiedBatchNo, err := stages.GetStageProgress(tx, stages.L1VerificationsBatchNo) if err != nil {