Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NubitDA integration on XLayer Node #246

Open
wants to merge 16 commits into
base: niven/nubit
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 25 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions cmd/run_xlayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"github.com/0xPolygonHermez/zkevm-node/config/apollo"
"github.com/0xPolygonHermez/zkevm-node/dataavailability"
"github.com/0xPolygonHermez/zkevm-node/dataavailability/datacommittee"
"github.com/0xPolygonHermez/zkevm-node/dataavailability/nubit"
"github.com/0xPolygonHermez/zkevm-node/etherman"
"github.com/0xPolygonHermez/zkevm-node/ethtxmanager"
"github.com/0xPolygonHermez/zkevm-node/event"
Expand Down Expand Up @@ -123,6 +124,25 @@ func newDataAvailability(c config.Config, st *state.State, etherman *etherman.Cl
if err != nil {
return nil, err
}
case string(dataavailability.DataAvailabilityNubitDA):
var (
pk *ecdsa.PrivateKey
err error
)
if isSequenceSender {
_, pk, err = etherman.LoadAuthFromKeyStoreXLayer(c.SequenceSender.DAPermitApiPrivateKey.Path, c.SequenceSender.DAPermitApiPrivateKey.Password)
if err != nil {
return nil, err
}
log.Infof("from pk %s", crypto.PubkeyToAddress(pk.PublicKey))
}
// daBackend, err = nubit.NewNubitDABackend(&c.DataAvailability, pk)
daBackend, err = nubit.NewGeneralDA(&c.DataAvailability)
if err != nil {
return nil, err
}
log.Info("generalDA is constructed successfully")

default:
return nil, fmt.Errorf("unexpected / unsupported DA protocol: %s", daProtocolName)
}
Expand Down
3 changes: 3 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (

"github.com/0xPolygonHermez/zkevm-node/aggregator"
"github.com/0xPolygonHermez/zkevm-node/config/types"
"github.com/0xPolygonHermez/zkevm-node/dataavailability/nubit"
"github.com/0xPolygonHermez/zkevm-node/db"
"github.com/0xPolygonHermez/zkevm-node/etherman"
"github.com/0xPolygonHermez/zkevm-node/ethtxmanager"
Expand Down Expand Up @@ -102,6 +103,8 @@ type Config struct {
SequenceSender sequencesender.Config
// Configuration of the aggregator service
Aggregator aggregator.Config
// Configuration of the NubitDA data availability service
DataAvailability nubit.Config
// Configuration of the genesis of the network. This is used to known the initial state of the network
NetworkConfig NetworkConfig
// Configuration of the gas price suggester service
Expand Down
3 changes: 3 additions & 0 deletions config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,9 @@ AggLayerTxTimeout = "5m"
AggLayerURL = ""
SequencerPrivateKey = {}

[DataAvailability]
NubitRpcURL = "http://127.0.0.1:9876"

[L2GasPriceSuggester]
Type = "follower"
UpdatePeriod = "10s"
Expand Down
2 changes: 2 additions & 0 deletions dataavailability/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,6 @@ type DABackendType string
const (
// DataAvailabilityCommittee is the DAC protocol backend
DataAvailabilityCommittee DABackendType = "DataAvailabilityCommittee"
// DataAvailabilityNubitDA is the NubitDA protocol backend
DataAvailabilityNubitDA DABackendType = "Nubit"
)
33 changes: 33 additions & 0 deletions dataavailability/nubit/abi.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package nubit

const blobDataABI = `[
{
"type": "function",
"name": "BlobData",
"inputs": [
{
"name": "blobData",
"type": "tuple",
"internalType": "struct NubitDAVerifier.BlobData",
"components": [
{
"name": "nubitHeight",
"type": "bytes",
"internalType": "bytes"
},
{
"name": "commitment",
"type": "bytes",
"internalType": "bytes"
},
{
"name": "sharesProof",
"type": "bytes",
"internalType": "bytes"
}
]
}
],
"stateMutability": "pure"
}
]`
113 changes: 113 additions & 0 deletions dataavailability/nubit/blob.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
package nubit

import (
"bytes"
"errors"
"fmt"
"reflect"

"github.com/ethereum/go-ethereum/accounts/abi"
)

// ErrConvertFromABIInterface is used when there is a decoding error
var ErrConvertFromABIInterface = errors.New("conversion from abi interface error")

// BlobData is the NubitDA blob data
type BlobData struct {
NubitHeight []byte `abi:"nubitHeight"`
Commitment []byte `abi:"commitment"`
SharesProof []byte `abi:"sharesProof"`
}

// TryEncodeToDataAvailabilityMessage is a fallible encoding method to encode
// Nubit blob data into data availability message represented as byte array.
func TryEncodeToDataAvailabilityMessage(blobData BlobData) ([]byte, error) {
parsedABI, err := abi.JSON(bytes.NewReader([]byte(blobDataABI)))
if err != nil {
return nil, err
}

// Encode the data
method, exist := parsedABI.Methods["BlobData"]
if !exist {
return nil, fmt.Errorf("abi error, BlobData method not found")
}

encoded, err := method.Inputs.Pack(blobData)
if err != nil {
return nil, err
}

return encoded, nil
}

// TryDecodeFromDataAvailabilityMessage is a fallible decoding method to
// decode data availability message into Nubit blob data.
func TryDecodeFromDataAvailabilityMessage(msg []byte) (BlobData, error) {
// Parse the ABI
parsedABI, err := abi.JSON(bytes.NewReader([]byte(blobDataABI)))
if err != nil {
return BlobData{}, err
}

// Decode the data
method, exist := parsedABI.Methods["BlobData"]
if !exist {
return BlobData{}, fmt.Errorf("abi error, BlobData method not found")
}

unpackedMap := make(map[string]interface{})
err = method.Inputs.UnpackIntoMap(unpackedMap, msg)
if err != nil {
return BlobData{}, err
}
unpacked, ok := unpackedMap["blobData"]
if !ok {
return BlobData{}, fmt.Errorf("abi error, failed to unpack to BlobData")
}

val := reflect.ValueOf(unpacked)
typ := reflect.TypeOf(unpacked)

blobData := BlobData{}

for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
value := val.Field(i)

switch field.Name {
case "NubitHeight":
blobData.NubitHeight, err = convertHeight(value)
if err != nil {
return BlobData{}, ErrConvertFromABIInterface
}
case "Commitment":
blobData.Commitment, err = convertCommitment(value)
if err != nil {
return BlobData{}, ErrConvertFromABIInterface
}
case "SharesProof":
blobData.SharesProof, err = convertSharesProof(value)
if err != nil {
return BlobData{}, ErrConvertFromABIInterface
}
default:
return BlobData{}, ErrConvertFromABIInterface
}
}

return blobData, nil
}

// -------- Helper fallible conversion methods --------
func convertHeight(val reflect.Value) ([]byte, error) {
return val.Interface().([]byte), nil
}

func convertCommitment(val reflect.Value) ([]byte, error) {
return val.Interface().([]byte), nil
}

func convertSharesProof(val reflect.Value) ([]byte, error) {
return val.Interface().([]byte), nil
}
48 changes: 48 additions & 0 deletions dataavailability/nubit/blob_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package nubit

import (
"testing"

"github.com/stretchr/testify/assert"
)

func TestEncodeBlobData(t *testing.T) {
data := BlobData{
NubitHeight: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04},
Commitment: []byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
},
SharesProof: []byte{},
}
msg, err := TryEncodeToDataAvailabilityMessage(data)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.NotEmpty(t, msg)
}

func TestEncodeDecodeBlobData(t *testing.T) {
data := BlobData{
NubitHeight: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04},
Commitment: []byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
},
SharesProof: []byte{},
}
msg, err := TryEncodeToDataAvailabilityMessage(data)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.NotEmpty(t, msg)

// Check blob ID
decoded_data, err := TryDecodeFromDataAvailabilityMessage(msg)
assert.NoError(t, err)
assert.Equal(t, data.NubitHeight, decoded_data.NubitHeight)
assert.Equal(t, data.Commitment, decoded_data.Commitment)
assert.Equal(t, data.SharesProof, decoded_data.SharesProof)
}
21 changes: 21 additions & 0 deletions dataavailability/nubit/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package nubit

import (
"time"
)

// NubitNamespaceBytesLength is the fixed-size bytes array.
const NubitNamespaceBytesLength = 58

// NubitMinCommitTime is the minimum commit time interval between blob submissions to NubitDA.
const NubitMinCommitTime time.Duration = 12 * time.Second

// Config is the NubitDA backend configurations
type Config struct {
NubitRpcURL string `mapstructure:"NubitRpcURL"`
// NubitValidatorURL string `mapstructure:"NubitValidatorURL"`
// NubitAuthKey string `mapstructure:"NubitAuthKey"`
// NubitNamespace string `mapstructure:"NubitNamespace"`
// NubitGetProofMaxRetry uint64 `mapstructure:"NubitGetProofMaxRetry"`
// NubitGetProofWaitPeriod types.Duration `mapstructure:"NubitGetProofWaitPeriod"`
}
75 changes: 75 additions & 0 deletions dataavailability/nubit/encoding.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
package nubit

import (
"encoding/binary"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)

// EncodeSequence is the helper function to encode sequence data and their metadata into 1D byte array.
// The encoding scheme is ensured to be lossless.
//
// When encoding the blob data, the first 8-bytes stores the size of the batches (n) in the sequence. The
// next n slots of sized 40 bytes stores the metadata of the batches data.
// The first 8-bytes of the batches metadata stores the batches data length, and the next 32-bytes stores
// the batches hash.
//
// The remaining n slots contains the batches data, each slot length is specified in the retrieved batch
// metadata.
func EncodeSequence(batchesData [][]byte) []byte {
sequence := []byte{}
metadata := []byte{}
n := uint64(len(batchesData))
bn := make([]byte, 8) //nolint:gomnd
binary.BigEndian.PutUint64(bn, n)
metadata = append(metadata, bn...)

for _, seq := range batchesData {
// Add batch data to byte array
sequence = append(sequence, seq...)

// Add batch metadata to byte array
// Batch metadata contains the byte array length and the Keccak256 hash of the
// batch data
n := uint64(len(seq))
bn := make([]byte, 8) //nolint:gomnd
binary.BigEndian.PutUint64(bn, n)
hash := crypto.Keccak256Hash(seq)
metadata = append(metadata, bn...)
metadata = append(metadata, hash.Bytes()...)
}
sequence = append(metadata, sequence...)

return sequence
}

// DecodeSequence is the helper function to decode the 1D byte array into sequence data and the batches
// metadata. The decoding sceheme is ensured to be lossless and follows the encoding scheme specified in
// the EncodeSequence function.
func DecodeSequence(blobData []byte) ([][]byte, []common.Hash) {
bn := blobData[:8]
n := binary.BigEndian.Uint64(bn)
// Each batch metadata contains the batch data byte array length (8 byte) and the
// batch data hash (32 byte)
metadata := blobData[8 : 40*n+8]
sequence := blobData[40*n+8:]

batchesData := [][]byte{}
batchesHash := []common.Hash{}
idx := uint64(0)
for i := uint64(0); i < n; i++ {
// Get batch metadata
bn := metadata[40*i : 40*i+8]
n := binary.BigEndian.Uint64(bn)

hash := common.BytesToHash(metadata[40*i+8 : 40*(i+1)])
batchesHash = append(batchesHash, hash)

// Get batch data
batchesData = append(batchesData, sequence[idx:idx+n])
idx += n
}

return batchesData, batchesHash
}
Loading
Loading