Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

introduce ForkedBlobSidecar for EIP-7688 Electra period before PeerDAS #6451

Draft
wants to merge 24 commits into
base: unstable
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d028bae
introduce `ForkedBlobSidecar` for EIP-7688 Electra period before PeerDAS
etan-status Jul 25, 2024
5fe9e93
fix `ncli_db`
etan-status Jul 25, 2024
a5d2cfe
`sszdump` support for `ForkyBlobSidecar`
etan-status Jul 25, 2024
a0cd636
add guard to DB initialization
etan-status Jul 25, 2024
5e582cf
guard deneb specific ref in workaround
etan-status Jul 25, 2024
88bdc6a
`Opt[Forky]` does not work as it does in `ref` and `seq` / `openArray`
etan-status Jul 25, 2024
5df441d
`ForkyBlobSidecar` in gossip validation
etan-status Jul 25, 2024
7d24e43
`valueOr` needs explicit fork for default value as well
etan-status Jul 25, 2024
365b567
pre-Deneb case for VC block publishing
etan-status Jul 25, 2024
eff1acd
pre-Deneb case for VC block publishing (ff)
etan-status Jul 25, 2024
9c9f2df
`Opt[seq[Forky]]`
etan-status Jul 25, 2024
c2d5a55
cleanup pre-deneb fallback
etan-status Jul 25, 2024
f9624b5
`eth2_processor` fix
etan-status Jul 25, 2024
97117eb
add Nim bug reference
etan-status Jul 25, 2024
2183c3c
explicitly mention Deneb in SSZ test
etan-status Jul 25, 2024
c8cc23b
extra defense
etan-status Jul 26, 2024
7a650fb
Merge branch 'stable' into dev/etan/df-forkedblobs
etan-status Aug 6, 2024
18af8dc
Merge branch 'stable' into dev/etan/df-forkedblobs
etan-status Aug 30, 2024
d976a93
Merge branch 'stable' into dev/etan/df-forkedblobs
etan-status Oct 4, 2024
a631188
Merge branch 'unstable' into dev/etan/df-forkedblobs
etan-status Oct 9, 2024
13df492
Merge branch 'stable' into dev/etan/df-forkedblobs
etan-status Dec 3, 2024
d9c6f08
Merge branch 'stable' into dev/etan/df-forkedblobs
etan-status Jan 6, 2025
40b205e
Merge branch 'unstable' into dev/etan/df-forkedblobs
etan-status Jan 6, 2025
76bdf3d
Lint
etan-status Jan 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion AllTests-mainnet.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ sanity check Fulu blocks [Preset: mainnet] OK
+ sanity check Fulu states [Preset: mainnet] OK
+ sanity check Fulu states, reusing buffers [Preset: mainnet] OK
+ sanity check blobs [Preset: mainnet] OK
+ sanity check blobs (Deneb) [Preset: mainnet] OK
+ sanity check data columns [Preset: mainnet] OK
+ sanity check genesis roundtrip [Preset: mainnet] OK
+ sanity check phase 0 blocks [Preset: mainnet] OK
Expand Down
45 changes: 28 additions & 17 deletions beacon_chain/beacon_chain_db.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -113,7 +113,7 @@ type
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock

blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
blobs: array[BlobFork, KvStoreRef] # (BlockRoot -> BlobSidecar)

columns: KvStoreRef # (BlockRoot -> DataColumnSidecar)

Expand Down Expand Up @@ -587,7 +587,9 @@ proc new*(T: type BeaconChainDB,
sealedPeriods: "lc_sealed_periods")).expectDb()
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra

var blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
var blobs: array[BlobFork, KvStoreRef]
blobs[BlobFork.Deneb] = kvStore db.openKvStore("deneb_blobs").expectDb()
static: doAssert BlobFork.high == BlobFork.Deneb

var columns: KvStoreRef
if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH:
Expand Down Expand Up @@ -798,8 +800,9 @@ proc close*(db: BeaconChainDB) =
# Close things roughly in reverse order
if not isNil(db.columns):
discard db.columns.close()
if not isNil(db.blobs):
discard db.blobs.close()
for blobFork in BlobFork:
if not isNil(db.blobs[blobFork]):
discard db.blobs[blobFork].close()
db.lcData.close()
db.finalizedBlocks.close()
discard db.summaries.close()
Expand Down Expand Up @@ -845,16 +848,20 @@ proc putBlock*(
db.blocks[type(value).kind].putSZSSZ(value.root.data, value)
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())

proc putBlobSidecar*(
db: BeaconChainDB,
value: BlobSidecar) =
proc putBlobSidecar*[T: ForkyBlobSidecar](
db: BeaconChainDB, value: T) =
let block_root = hash_tree_root(value.signed_block_header.message)
db.blobs.putSZSSZ(blobkey(block_root, value.index), value)
db.blobs[T.kind].putSZSSZ(blobkey(block_root, value.index), value)

proc delBlobSidecar*(
db: BeaconChainDB,
root: Eth2Digest, index: BlobIndex): bool =
db.blobs.del(blobkey(root, index)).expectDb()
var res = false
for blobFork in BlobFork:
if db.blobs[blobFork] == nil: continue
if db.blobs[blobFork].del(blobkey(root, index)).expectDb():
res = true
res

proc putDataColumnSidecar*(
db: BeaconChainDB,
Expand Down Expand Up @@ -1120,18 +1127,22 @@ proc getBlockSSZ*(
withConsensusFork(fork):
getBlockSSZ(db, key, data, consensusFork.TrustedSignedBeaconBlock)

proc getBlobSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
data: var seq[byte]): bool =
proc getBlobSidecarSZ*[T: ForkyBlobSidecar](
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
data: var seq[byte]): bool =
if db.blobs[T.kind] == nil: return false
let dataPtr = addr data # Short-lived
func decode(data: openArray[byte]) =
assign(dataPtr[], data)
db.blobs.get(blobkey(root, index), decode).expectDb()
db.blobs[T.kind].get(blobkey(root, index), decode).expectDb()

proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
value: var BlobSidecar): bool =
db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found
proc getBlobSidecar*[T: ForkyBlobSidecar](
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
value: var T): bool =
if db.blobs[T.kind] == nil: return false
db.blobs[T.kind].getSZSSZ(blobkey(root, index), value) == GetResult.found

proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest,
proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest,
index: ColumnIndex, data: var seq[byte]): bool =
let dataPtr = addr data # Short-lived
func decode(data: openArray[byte]) =
Expand Down
37 changes: 25 additions & 12 deletions beacon_chain/beacon_chain_file.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -267,6 +267,13 @@ proc getBlockConsensusFork(header: ChainFileHeader): ConsensusFork =
else:
raiseAssert("Should not happen")

proc getBlobConsensusFork(header: ChainFileHeader): ConsensusFork =
let hkind = unmaskKind(header.kind)
if int(hkind) in BlobForkCodeRange:
cast[ConsensusFork](ConsensusFork.Deneb.uint64 + hkind)
else:
raiseAssert("Should not happen")

template isBlock(h: ChainFileHeader | ChainFileFooter): bool =
let hkind = unmaskKind(h.kind)
int(hkind) in BlockForkCodeRange
Expand All @@ -291,7 +298,7 @@ proc setTail*(chandle: var ChainFileHandle, bdata: BlockData) =
chandle.data.tail = Opt.some(bdata)

proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars]): Result[void, string] =
blobs: Opt[ForkedBlobSidecars]): Result[void, string] =
let origOffset =
updateFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).valueOr:
return err(ioErrorMsg(error))
Expand Down Expand Up @@ -321,10 +328,11 @@ proc store*(chandle: ChainFileHandle, signedBlock: ForkedSignedBeaconBlock,
kind =
getBlobChunkKind(signedBlock.kind, (index + 1) == len(blobSidecars))
(data, plainSize) =
block:
let res = SSZ.encode(blob[])
withForkyBlob(blob):
let res = SSZ.encode(forkyBlob[])
(snappy.encode(res), len(res))
slot = blob[].signed_block_header.message.slot
slot = withForkyBlob(blob):
forkyBlob[].signed_block_header.message.slot
buffer = Chunk.init(kind, uint64(slot), uint32(plainSize), data)

setFilePos(chandle.handle, 0'i64, SeekPosition.SeekEnd).isOkOr:
Expand Down Expand Up @@ -537,21 +545,26 @@ proc decodeBlock(
proc decodeBlob(
header: ChainFileHeader,
data: openArray[byte]
): Result[BlobSidecar, string] =
): Result[ForkedBlobSidecar, string] =
if header.plainSize > uint32(MaxChunkSize):
return err("Size of blob is enormously big")

let
fork = header.getBlobConsensusFork()
blobFork = blobForkAtConsensusFork(fork).valueOr:
return err("Fork does not support blobs")
decompressed = snappy.decode(data, uint32(header.plainSize))
blob =
try:
SSZ.decode(decompressed, BlobSidecar)
withBlobFork(blobFork):
ForkedBlobSidecar.init(newClone(
SSZ.decode(decompressed, blobFork.BlobSidecar)))
except SerializationError:
return err("Incorrect blob format")
ok(blob)

proc getChainFileTail*(handle: IoHandle): Result[Opt[BlockData], string] =
var sidecars: BlobSidecars
var sidecars: ForkedBlobSidecars
while true:
let chunk =
block:
Expand All @@ -565,7 +578,7 @@ proc getChainFileTail*(handle: IoHandle): Result[Opt[BlockData], string] =
res.get()
if chunk.header.isBlob():
let blob = ? decodeBlob(chunk.header, chunk.data)
sidecars.add(newClone blob)
sidecars.add(blob)
else:
let blck = ? decodeBlock(chunk.header, chunk.data)
return
Expand Down Expand Up @@ -594,7 +607,7 @@ proc getChainFileHead*(handle: IoHandle): Result[Opt[BlockData], string] =
? decodeBlock(chunk.header, chunk.data)
blob =
block:
var sidecars: BlobSidecars
var sidecars: ForkedBlobSidecars
block mainLoop:
while true:
offset = getFilePos(handle).valueOr:
Expand All @@ -609,14 +622,14 @@ proc getChainFileHead*(handle: IoHandle): Result[Opt[BlockData], string] =
res.get()
if chunk.header.isBlob():
let blob = ? decodeBlob(chunk.header, chunk.data)
sidecars.add(newClone blob)
sidecars.add(blob)
else:
break mainLoop

if len(sidecars) > 0:
Opt.some(sidecars)
else:
Opt.none(BlobSidecars)
Opt.none(ForkedBlobSidecars)

if not(endOfFile):
setFilePos(handle, offset, SeekPosition.SeekBegin).isOkOr:
Expand Down
72 changes: 45 additions & 27 deletions beacon_chain/consensus_object_pools/blob_quarantine.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand All @@ -21,8 +21,8 @@ const

type
BlobQuarantine* = object
blobs*:
OrderedTable[(Eth2Digest, BlobIndex, KzgCommitment), ref BlobSidecar]
blobs*: OrderedTable[
(Eth2Digest, BlobIndex, KzgCommitment), ForkedBlobSidecar]
onBlobSidecarCallback*: OnBlobSidecarCallback

BlobFetchRecord* = object
Expand All @@ -38,7 +38,7 @@ func shortLog*(x: seq[BlobIndex]): string =
func shortLog*(x: seq[BlobFetchRecord]): string =
"[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]"

func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
func put*(quarantine: var BlobQuarantine, blobSidecar: ForkedBlobSidecar) =
if quarantine.blobs.lenu64 >= MaxBlobs:
# FIFO if full. For example, sync manager and request manager can race to
# put blobs in at the same time, so one gets blob insert -> block resolve
Expand All @@ -53,48 +53,66 @@ func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
oldest_blob_key = k
break
quarantine.blobs.del oldest_blob_key
let block_root = hash_tree_root(blobSidecar.signed_block_header.message)
discard quarantine.blobs.hasKeyOrPut(
(block_root, blobSidecar.index, blobSidecar.kzg_commitment), blobSidecar)
withForkyBlob(blobSidecar):
let block_root = hash_tree_root(forkyBlob[].signed_block_header.message)
discard quarantine.blobs.hasKeyOrPut(
(block_root, forkyBlob[].index, forkyBlob[].kzg_commitment), blobSidecar)

func put*(quarantine: var BlobQuarantine, blobSidecar: ref ForkyBlobSidecar) =
quarantine.put(ForkedBlobSidecar.init(blobSidecar))

func hasBlob*(
quarantine: BlobQuarantine,
slot: Slot,
proposer_index: uint64,
index: BlobIndex): bool =
for blob_sidecar in quarantine.blobs.values:
template block_header: untyped = blob_sidecar.signed_block_header.message
if block_header.slot == slot and
block_header.proposer_index == proposer_index and
blob_sidecar.index == index:
return true
for blobSidecar in quarantine.blobs.values:
withForkyBlob(blobSidecar):
template block_header: untyped = forkyBlob[].signed_block_header.message
if block_header.slot == slot and
block_header.proposer_index == proposer_index and
forkyBlob[].index == index:
return true
false

func popBlobs*(
quarantine: var BlobQuarantine, digest: Eth2Digest,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock |
fulu.SignedBeaconBlock):
seq[ref BlobSidecar] =
var r: seq[ref BlobSidecar] = @[]
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock |
fulu.SignedBeaconBlock): auto =
const blobFork = blobForkAtConsensusFork(typeof(blck).kind).expect("Blobs OK")
type ResultType = blobFork.BlobSidecars
var r: ResultType = @[]
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
var b: ref BlobSidecar
var b: ForkedBlobSidecar
if quarantine.blobs.pop((digest, BlobIndex idx, kzg_commitment), b):
r.add(b)
# It was already verified that the blob is linked to `blck`.
# Therefore, we can assume that `BlobFork` is correct.
doAssert b.kind == blobFork,
"Must verify blob inclusion proof before `BlobQuarantine.put`"
r.add(b.forky(blobFork))
r

func hasBlobs*(quarantine: BlobQuarantine,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock |
fulu.SignedBeaconBlock): bool =
# Having a fulu SignedBeaconBlock is incorrect atm, but
# shall be fixed once data columns are rebased to fulu
func hasBlobs*(
quarantine: BlobQuarantine,
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock |
fulu.SignedBeaconBlock): bool =
# Having a fulu SignedBeaconBlock is incorrect atm, but
# shall be fixed once data columns are rebased to fulu
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
if (blck.root, BlobIndex idx, kzg_commitment) notin quarantine.blobs:
return false
true

func blobFetchRecord*(quarantine: BlobQuarantine,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock |
fulu.SignedBeaconBlock): BlobFetchRecord =
func blobFetchRecord*(
quarantine: BlobQuarantine,
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock |
fulu.SignedBeaconBlock): BlobFetchRecord =
var indices: seq[BlobIndex]
for i in 0..<len(blck.message.body.blob_kzg_commitments):
let idx = BlobIndex(i)
Expand Down
5 changes: 3 additions & 2 deletions beacon_chain/consensus_object_pools/block_clearance.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -535,7 +535,8 @@ proc addBackfillBlockData*(

if bdata.blob.isSome():
for blob in bdata.blob.get():
dag.db.putBlobSidecar(blob[])
withForkyBlob(blob):
dag.db.putBlobSidecar(forkyBlob[])

type Trusted = typeof forkyBlck.asTrusted()

Expand Down
4 changes: 2 additions & 2 deletions beacon_chain/consensus_object_pools/block_pools_types.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
Expand Down Expand Up @@ -287,7 +287,7 @@ type

BlockData* = object
blck*: ForkedSignedBeaconBlock
blob*: Opt[BlobSidecars]
blob*: Opt[ForkedBlobSidecars]

OnBlockAdded*[T: ForkyTrustedSignedBeaconBlock] = proc(
blckRef: BlockRef, blck: T, epochRef: EpochRef,
Expand Down
Loading
Loading