From 2ab8c3b8c7ad947e1df934163cf7a1fd1af87731 Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Wed, 23 Oct 2024 12:56:13 +0200 Subject: [PATCH] PeerDAS: Batch columns verifications (#14559) * `ColumnAlignsWithBlock`: Split lines. * Data columns verifications: Batch * Remove completely `DataColumnBatchVerifier`. Only `DataColumnsVerifier` (with `s`) on columns remains. It is the responsability of the function which receive the data column (either by gossip, by range request or by root request) to verify the data column wrt. corresponding checks. * Fix Nishant's comment. --- beacon-chain/core/peerdas/helpers.go | 53 +- beacon-chain/core/peerdas/helpers_test.go | 2 +- beacon-chain/das/availability_columns.go | 49 +- beacon-chain/sync/BUILD.bazel | 1 + beacon-chain/sync/data_columns_sampling.go | 76 +- .../sync/data_columns_sampling_test.go | 24 +- .../sync/initial-sync/blocks_fetcher.go | 119 +- .../sync/initial-sync/blocks_fetcher_test.go | 10 +- .../sync/initial-sync/blocks_queue.go | 2 +- beacon-chain/sync/initial-sync/round_robin.go | 8 +- beacon-chain/sync/initial-sync/service.go | 51 +- .../sync/initial-sync/service_test.go | 4 +- .../sync/rpc_beacon_blocks_by_root.go | 19 +- beacon-chain/sync/rpc_send_request.go | 4 +- beacon-chain/sync/service.go | 10 +- beacon-chain/sync/validate_data_column.go | 38 +- beacon-chain/sync/verify/BUILD.bazel | 1 + beacon-chain/sync/verify/blob.go | 74 +- beacon-chain/verification/batch.go | 85 +- beacon-chain/verification/blob_test.go | 6 +- beacon-chain/verification/data_column.go | 393 ++++-- beacon-chain/verification/data_column_test.go | 1197 +++++++++++------ beacon-chain/verification/initializer.go | 14 +- beacon-chain/verification/interface.go | 12 +- 24 files changed, 1359 insertions(+), 893 deletions(-) diff --git a/beacon-chain/core/peerdas/helpers.go b/beacon-chain/core/peerdas/helpers.go index 3dddd20b92fb..ad6500e0bec3 100644 --- a/beacon-chain/core/peerdas/helpers.go +++ b/beacon-chain/core/peerdas/helpers.go @@ -405,34 +405,53 @@ func DataColumnSidecarsForReconstruct( return sidecars, nil } -// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular -// data column. -func VerifyDataColumnSidecarKZGProofs(sc blocks.RODataColumn) (bool, error) { +// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns. +func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) { + // Retrieve the number of columns. numberOfColumns := params.BeaconConfig().NumberOfColumns - if sc.ColumnIndex >= numberOfColumns { - return false, errIndexTooLarge + // Compute the total count. + count := 0 + for _, sidecar := range sidecars { + count += len(sidecar.DataColumn) } - if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) { - return false, errMismatchLength - } - - count := len(sc.DataColumn) - commitments := make([]kzg.Bytes48, 0, count) indices := make([]uint64, 0, count) cells := make([]kzg.Cell, 0, count) proofs := make([]kzg.Bytes48, 0, count) - for i := range sc.DataColumn { - commitments = append(commitments, kzg.Bytes48(sc.KzgCommitments[i])) - indices = append(indices, sc.ColumnIndex) - cells = append(cells, kzg.Cell(sc.DataColumn[i])) - proofs = append(proofs, kzg.Bytes48(sc.KzgProof[i])) + for _, sidecar := range sidecars { + // Check if the columns index is not too large + if sidecar.ColumnIndex >= numberOfColumns { + return false, errIndexTooLarge + } + + // Check if the KZG commitments size and data column size match. + if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) { + return false, errMismatchLength + } + + // Check if the KZG proofs size and data column size match. + if len(sidecar.DataColumn) != len(sidecar.KzgProof) { + return false, errMismatchLength + } + + for i := range sidecar.DataColumn { + commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i])) + indices = append(indices, sidecar.ColumnIndex) + cells = append(cells, kzg.Cell(sidecar.DataColumn[i])) + proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i])) + } + } + + // Verify all the batch at once. + verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs) + if err != nil { + return false, errors.Wrap(err, "verify cell KZG proof batch") } - return kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs) + return verified, nil } // CustodySubnetCount returns the number of subnets the node should participate in for custody. diff --git a/beacon-chain/core/peerdas/helpers_test.go b/beacon-chain/core/peerdas/helpers_test.go index 1e86f0de2228..389680b97889 100644 --- a/beacon-chain/core/peerdas/helpers_test.go +++ b/beacon-chain/core/peerdas/helpers_test.go @@ -93,7 +93,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) { for i, sidecar := range sCars { roCol, err := blocks.NewRODataColumn(sidecar) require.NoError(t, err) - verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(roCol) + verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol}) require.NoError(t, err) require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i)) } diff --git a/beacon-chain/das/availability_columns.go b/beacon-chain/das/availability_columns.go index 8383873d4f36..ea963d5a482d 100644 --- a/beacon-chain/das/availability_columns.go +++ b/beacon-chain/das/availability_columns.go @@ -8,7 +8,6 @@ import ( errors "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem" - "github.com/prysmaticlabs/prysm/v5/beacon-chain/verification" "github.com/prysmaticlabs/prysm/v5/config/params" "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives" @@ -21,22 +20,14 @@ import ( // This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their // block, at which time they will undergo full verification and be saved to the disk. type LazilyPersistentStoreColumn struct { - store *filesystem.BlobStorage - cache *cache - verifier ColumnBatchVerifier - nodeID enode.ID + store *filesystem.BlobStorage + cache *cache } -type ColumnBatchVerifier interface { - VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) -} - -func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn { +func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage) *LazilyPersistentStoreColumn { return &LazilyPersistentStoreColumn{ - store: store, - cache: newCache(), - verifier: verifier, - nodeID: id, + store: store, + cache: newCache(), } } @@ -120,33 +111,23 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable( // Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent. // We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather // ignore their response and decrease their peer score. - sidecars, err := entry.filterColumns(blockRoot, blockCommitments) + roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments) if err != nil { return errors.Wrap(err, "incomplete BlobSidecar batch") } - // Do thorough verifications of each RODataColumns for the block. - // Same as above, we don't save DataColumnsSidecars if there are any problems with the batch. - vscs, err := s.verifier.VerifiedRODataColumns(ctx, block, sidecars) - if err != nil { - var me verification.VerificationMultiError - ok := errors.As(err, &me) - if ok { - fails := me.Failures() - lf := make(log.Fields, len(fails)) - for i := range fails { - lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error() - } - log.WithFields(lf). - Debug("invalid ColumnSidecars received") - } - return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", blockRoot) + // Create verified RO data columns from RO data columns. + verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns)) + + for _, roDataColumn := range roDataColumns { + verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn) + verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn) } // Ensure that each column sidecar is written to disk. - for i := range vscs { - if err := s.store.SaveDataColumn(vscs[i]); err != nil { - return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", vscs[i].ColumnIndex, blockRoot) + for _, verifiedRODataColumn := range verifiedRODataColumns { + if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil { + return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot) } } diff --git a/beacon-chain/sync/BUILD.bazel b/beacon-chain/sync/BUILD.bazel index b939a761a2e3..6378d7e1e125 100644 --- a/beacon-chain/sync/BUILD.bazel +++ b/beacon-chain/sync/BUILD.bazel @@ -211,6 +211,7 @@ go_test( "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/operation:go_default_library", + "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/peerdas:go_default_library", "//beacon-chain/core/signing:go_default_library", diff --git a/beacon-chain/sync/data_columns_sampling.go b/beacon-chain/sync/data_columns_sampling.go index 169725e5916b..ffcc264ac21f 100644 --- a/beacon-chain/sync/data_columns_sampling.go +++ b/beacon-chain/sync/data_columns_sampling.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify" "github.com/prysmaticlabs/prysm/v5/beacon-chain/verification" "github.com/sirupsen/logrus" @@ -60,7 +61,7 @@ type dataColumnSampler1D struct { // peerFromColumn maps a column to the peer responsible for custody. peerFromColumn map[uint64]map[peer.ID]bool // columnVerifier verifies a column according to the specified requirements. - columnVerifier verification.NewColumnVerifier + columnVerifier verification.NewDataColumnsVerifier } // newDataColumnSampler1D creates a new 1D data column sampler. @@ -69,7 +70,7 @@ func newDataColumnSampler1D( clock *startup.Clock, ctxMap ContextByteVersions, stateNotifier statefeed.Notifier, - colVerifier verification.NewColumnVerifier, + colVerifier verification.NewDataColumnsVerifier, ) *dataColumnSampler1D { numColumns := params.BeaconConfig().NumberOfColumns peerFromColumn := make(map[uint64]map[peer.ID]bool, numColumns) @@ -265,7 +266,7 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event samplesCount := min(params.BeaconConfig().SamplesPerSlot, uint64(len(d.nonCustodyColumns))-params.BeaconConfig().NumberOfColumns/2) // TODO: Use the first output of `incrementalDAS` as input of the fork choice rule. - _, _, err = d.incrementalDAS(ctx, data.BlockRoot, randomizedColumns, samplesCount) + _, _, err = d.incrementalDAS(ctx, data, randomizedColumns, samplesCount) if err != nil { log.WithError(err).Error("Failed to run incremental DAS") } @@ -276,13 +277,14 @@ func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event // According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns. func (d *dataColumnSampler1D) incrementalDAS( ctx context.Context, - root [fieldparams.RootLength]byte, + blockProcessedData *statefeed.BlockProcessedData, columns []uint64, sampleCount uint64, ) (bool, []roundSummary, error) { allowedFailures := uint64(0) firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures) roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary. + blockRoot := blockProcessedData.BlockRoot start := time.Now() @@ -290,7 +292,7 @@ func (d *dataColumnSampler1D) incrementalDAS( if extendedSampleCount > uint64(len(columns)) { // We already tried to sample all possible columns, this is the unhappy path. log.WithFields(logrus.Fields{ - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockRoot), "round": round - 1, }).Warning("Some columns are still missing after trying to sample all possible columns") return false, roundSummaries, nil @@ -301,13 +303,13 @@ func (d *dataColumnSampler1D) incrementalDAS( columnsToSampleCount := extendedSampleCount - firstColumnToSample log.WithFields(logrus.Fields{ - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockRoot), "columns": columnsToSample, "round": round, }).Debug("Start data columns sampling") // Sample data columns from peers in parallel. - retrievedSamples := d.sampleDataColumns(ctx, root, columnsToSample) + retrievedSamples := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample) missingSamples := make(map[uint64]bool) for _, column := range columnsToSample { @@ -325,7 +327,7 @@ func (d *dataColumnSampler1D) incrementalDAS( if retrievedSampleCount == columnsToSampleCount { // All columns were correctly sampled, this is the happy path. log.WithFields(logrus.Fields{ - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockRoot), "neededRounds": round, "duration": time.Since(start), }).Debug("All columns were successfully sampled") @@ -344,7 +346,7 @@ func (d *dataColumnSampler1D) incrementalDAS( extendedSampleCount = peerdas.ExtendedSampleCount(sampleCount, allowedFailures) log.WithFields(logrus.Fields{ - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockRoot), "round": round, "missingColumnsCount": allowedFailures, "currentSampleIndex": oldExtendedSampleCount, @@ -355,7 +357,7 @@ func (d *dataColumnSampler1D) incrementalDAS( func (d *dataColumnSampler1D) sampleDataColumns( ctx context.Context, - root [fieldparams.RootLength]byte, + blockProcessedData *statefeed.BlockProcessedData, columns []uint64, ) map[uint64]bool { // distribute samples to peer @@ -365,10 +367,12 @@ func (d *dataColumnSampler1D) sampleDataColumns( mu sync.Mutex wg sync.WaitGroup ) + res := make(map[uint64]bool) + sampleFromPeer := func(pid peer.ID, cols map[uint64]bool) { defer wg.Done() - retrieved := d.sampleDataColumnsFromPeer(ctx, pid, root, cols) + retrieved := d.sampleDataColumnsFromPeer(ctx, pid, blockProcessedData, cols) mu.Lock() for col := range retrieved { @@ -414,7 +418,7 @@ func (d *dataColumnSampler1D) distributeSamplesToPeer( func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( ctx context.Context, pid peer.ID, - root [fieldparams.RootLength]byte, + blockProcessedData *statefeed.BlockProcessedData, requestedColumns map[uint64]bool, ) map[uint64]bool { retrievedColumns := make(map[uint64]bool) @@ -422,7 +426,7 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( req := make(types.DataColumnSidecarsByRootReq, 0) for col := range requestedColumns { req = append(req, ð.DataColumnIdentifier{ - BlockRoot: root[:], + BlockRoot: blockProcessedData.BlockRoot[:], ColumnIndex: col, }) } @@ -434,8 +438,9 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( return nil } + // TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns. for _, roDataColumn := range roDataColumns { - if verifyColumn(roDataColumn, root, pid, requestedColumns, d.columnVerifier) { + if verifyColumn(roDataColumn, blockProcessedData, pid, requestedColumns, d.columnVerifier) { retrievedColumns[roDataColumn.ColumnIndex] = true } } @@ -443,13 +448,13 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer( if len(retrievedColumns) == len(requestedColumns) { log.WithFields(logrus.Fields{ "peerID": pid, - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot), "requestedColumns": sortedSliceFromMap(requestedColumns), }).Debug("Sampled columns from peer successfully") } else { log.WithFields(logrus.Fields{ "peerID": pid, - "root": fmt.Sprintf("%#x", root), + "root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot), "requestedColumns": sortedSliceFromMap(requestedColumns), "retrievedColumns": sortedSliceFromMap(retrievedColumns), }).Debug("Sampled columns from peer with some errors") @@ -506,20 +511,22 @@ func selectRandomPeer(peers map[peer.ID]bool) peer.ID { // the KZG inclusion and the KZG proof. func verifyColumn( roDataColumn blocks.RODataColumn, - root [32]byte, + blockProcessedData *statefeed.BlockProcessedData, pid peer.ID, requestedColumns map[uint64]bool, - columnVerifier verification.NewColumnVerifier, + dataColumnsVerifier verification.NewDataColumnsVerifier, ) bool { retrievedColumn := roDataColumn.ColumnIndex // Filter out columns with incorrect root. - actualRoot := roDataColumn.BlockRoot() - if actualRoot != root { + columnRoot := roDataColumn.BlockRoot() + blockRoot := blockProcessedData.BlockRoot + + if columnRoot != blockRoot { log.WithFields(logrus.Fields{ "peerID": pid, - "requestedRoot": fmt.Sprintf("%#x", root), - "actualRoot": fmt.Sprintf("%#x", actualRoot), + "requestedRoot": fmt.Sprintf("%#x", blockRoot), + "columnRoot": fmt.Sprintf("%#x", columnRoot), }).Debug("Retrieved root does not match requested root") return false @@ -538,25 +545,18 @@ func verifyColumn( return false } - vf := columnVerifier(roDataColumn, verification.SamplingColumnSidecarRequirements) - // Filter out columns which did not pass the KZG inclusion proof verification. - if err := vf.SidecarInclusionProven(); err != nil { - log.WithFields(logrus.Fields{ - "peerID": pid, - "root": fmt.Sprintf("%#x", root), - "index": retrievedColumn, - }).WithError(err).Debug("Failed to verify KZG inclusion proof for retrieved column") - return false + roBlock := blockProcessedData.SignedBlock.Block() + + wrappedBlockDataColumns := []verify.WrappedBlockDataColumn{ + { + ROBlock: roBlock, + RODataColumn: roDataColumn, + }, } - // Filter out columns which did not pass the KZG proof verification. - if err := vf.SidecarKzgProofVerified(); err != nil { - log.WithFields(logrus.Fields{ - "peerID": pid, - "root": fmt.Sprintf("%#x", root), - "index": retrievedColumn, - }).WithError(err).Debug("Failed to verify KZG proof for retrieved column") + if err := verify.DataColumnsAlignWithBlock(wrappedBlockDataColumns, dataColumnsVerifier); err != nil { return false } + return true } diff --git a/beacon-chain/sync/data_columns_sampling_test.go b/beacon-chain/sync/data_columns_sampling_test.go index 281b46b56743..2622ca8354cc 100644 --- a/beacon-chain/sync/data_columns_sampling_test.go +++ b/beacon-chain/sync/data_columns_sampling_test.go @@ -16,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg" mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing" + statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers" @@ -127,7 +128,7 @@ type dataSamplerTest struct { peers []*p2ptest.TestP2P ctxMap map[[4]byte]int chainSvc *mock.ChainService - blockRoot [32]byte + blockProcessedData *statefeed.BlockProcessedData blobs []kzg.Blob kzgCommitments [][]byte kzgProofs [][]byte @@ -141,12 +142,16 @@ func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataCol ) test, sampler := setupDataColumnSamplerTest(t, blobCount) + // Custody columns: [6, 38, 70, 102] p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 1) + // Custody columns: [3, 35, 67, 99] p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 2) + // Custody columns: [12, 44, 76, 108] p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 3) + test.peers = []*p2ptest.TestP2P{p1, p2, p3} return test, sampler @@ -182,6 +187,11 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot() require.NoError(t, err) + blockProcessedData := &statefeed.BlockProcessedData{ + BlockRoot: blockRoot, + SignedBlock: sBlock, + } + p2pSvc := p2ptest.NewTestP2P(t) chainSvc, clock := defaultMockChain(t) @@ -191,7 +201,7 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes peers: []*p2ptest.TestP2P{}, ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb}, chainSvc: chainSvc, - blockRoot: blockRoot, + blockProcessedData: blockProcessedData, blobs: blobs, kzgCommitments: kzgCommitments, kzgProofs: kzgProofs, @@ -202,7 +212,7 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil) ini, err := iniWaiter.WaitForInitializer(context.Background()) require.NoError(t, err) - sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newColumnVerifierFromInitializer(ini)) + sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newDataColumnsVerifierFromInitializer(ini)) return test, sampler } @@ -396,7 +406,7 @@ func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { // Sample all columns. sampleColumns := []uint64{6, 3, 12, 38, 35, 44, 70, 67, 76, 102, 99, 108} - retrieved := sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns) + retrieved := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) require.Equal(t, 12, len(retrieved)) for _, column := range sampleColumns { require.Equal(t, true, retrieved[column]) @@ -404,7 +414,7 @@ func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { // Sample a subset of columns. sampleColumns = []uint64{6, 3, 12, 38, 35, 44} - retrieved = sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns) + retrieved = sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) require.Equal(t, 6, len(retrieved)) for _, column := range sampleColumns { require.Equal(t, true, retrieved[column]) @@ -412,7 +422,7 @@ func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { // Sample a subset of columns with missing columns. sampleColumns = []uint64{6, 3, 12, 127} - retrieved = sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns) + retrieved = sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns) require.Equal(t, 3, len(retrieved)) require.DeepEqual(t, map[uint64]bool{6: true, 3: true, 12: true}, retrieved) } @@ -489,7 +499,7 @@ func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { sampler.refreshPeerInfo() - success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockRoot, tc.possibleColumnsToRequest, tc.samplesCount) + success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount) require.NoError(t, err) require.Equal(t, tc.expectedSuccess, success) require.DeepEqual(t, tc.expectedRoundSummaries, summaries) diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher.go b/beacon-chain/sync/initial-sync/blocks_fetcher.go index deb9998ec88f..32bf00f76142 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher.go @@ -82,7 +82,7 @@ type blocksFetcherConfig struct { mode syncMode bs filesystem.BlobStorageSummarizer bv verification.NewBlobVerifier - cv verification.NewColumnVerifier + cv verification.NewDataColumnsVerifier } // blocksFetcher is a service to fetch chain data from peers. @@ -100,7 +100,7 @@ type blocksFetcher struct { db db.ReadOnlyDatabase bs filesystem.BlobStorageSummarizer bv verification.NewBlobVerifier - cv verification.NewColumnVerifier + cv verification.NewDataColumnsVerifier blocksPerPeriod uint64 rateLimiter *leakybucket.Collector peerLocks map[peer.ID]*peerLock @@ -1155,67 +1155,91 @@ func (f *blocksFetcher) waitForPeersForDataColumns( return dataColumnsByAdmissiblePeer, nil } -// processDataColumn mutates `bwbs` argument by adding the data column, +// processDataColumns mutates `bwbs` argument by adding the data column, // and mutates `missingColumnsByRoot` by removing the data column if the // data column passes all the check. -func processDataColumn( +func (f *blocksFetcher) processDataColumns( wrappedBwbsMissingColumns *bwbsMissingColumns, - columnVerifier verification.NewColumnVerifier, - blocksByRoot map[[fieldparams.RootLength]byte]blocks.ROBlock, + blockByRoot map[[fieldparams.RootLength]byte]blocks.ROBlock, indicesByRoot map[[fieldparams.RootLength]byte][]int, - dataColumn blocks.RODataColumn, + dataColumns []blocks.RODataColumn, ) bool { - // Extract the block root from the data column. - blockRoot := dataColumn.BlockRoot() - - // Find the position of the block in `bwbs` that corresponds to this block root. - indices, ok := indicesByRoot[blockRoot] - if !ok { - // The peer returned a data column that we did not expect. - // This is among others possible when the peer is not on the same fork. - return false - } + // Fiter out data columns: + // - that are not expected and, + // - which correspond to blocks before Deneb. + + // Not expected data columns are among others possible when + // the peer is not on the same fork, due to the nature of + // data columns by range requests. + wrappedBlockDataColumns := make([]verify.WrappedBlockDataColumn, 0, len(dataColumns)) + for _, dataColumn := range dataColumns { + // Extract the block root from the data column. + blockRoot := dataColumn.BlockRoot() + + // Skip if the block root is not expected. + // This is possible when the peer is not on the same fork. + _, ok := indicesByRoot[blockRoot] + if !ok { + continue + } - // Extract the block from the block root. - block, ok := blocksByRoot[blockRoot] - if !ok { - // This should never happen. - log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Error("Fetch data columns from peers - block not found") - return false - } + // Retrieve the block from the block root. + block, ok := blockByRoot[blockRoot] + if !ok { + // This should never happen. + log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Error("Fetch data columns from peers - block not found for root") + return false + } + + // Skip if the block is before Deneb. + if block.Version() < version.Deneb { + continue + } - // Verify the data column. - if err := verify.ColumnAlignsWithBlock(dataColumn, block, columnVerifier); err != nil { - log.WithError(err).WithFields(logrus.Fields{ - "root": fmt.Sprintf("%#x", blockRoot), - "slot": block.Block().Slot(), - "column": dataColumn.ColumnIndex, - }).Warning("Fetch data columns from peers - fetched data column does not align with block") + wrappedBlockDataColumn := verify.WrappedBlockDataColumn{ + ROBlock: block.Block(), + RODataColumn: dataColumn, + } + + wrappedBlockDataColumns = append(wrappedBlockDataColumns, wrappedBlockDataColumn) + } + // Verify the data columns. + if err := verify.DataColumnsAlignWithBlock(wrappedBlockDataColumns, f.cv); err != nil { // TODO: Should we downscore the peer for that? return false } - // Populate the corresponding items in `bwbs`. - func() { - mu := &wrappedBwbsMissingColumns.mu + wrappedBwbsMissingColumns.mu.Lock() + defer wrappedBwbsMissingColumns.mu.Unlock() + + bwbs := wrappedBwbsMissingColumns.bwbs + missingColumnsByRoot := wrappedBwbsMissingColumns.missingColumnsByRoot - mu.Lock() - defer mu.Unlock() + for _, wrappedBlockDataColumn := range wrappedBlockDataColumns { + dataColumn := wrappedBlockDataColumn.RODataColumn - bwbs := wrappedBwbsMissingColumns.bwbs - missingColumnsByRoot := wrappedBwbsMissingColumns.missingColumnsByRoot + // Extract the block root from the data column. + blockRoot := dataColumn.BlockRoot() + + // Extract the indices in bwb corresponding to the block root. + indices, ok := indicesByRoot[blockRoot] + if !ok { + // This should never happen. + log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Error("Fetch data columns from peers - indices not found for root") + return false + } + // Populate the corresponding items in `bwbs`. for _, index := range indices { bwbs[index].Columns = append(bwbs[index].Columns, dataColumn) } - // Remove the column from the missing columns. delete(missingColumnsByRoot[blockRoot], dataColumn.ColumnIndex) if len(missingColumnsByRoot[blockRoot]) == 0 { delete(missingColumnsByRoot, blockRoot) } - }() + } return true } @@ -1288,7 +1312,7 @@ func (f *blocksFetcher) fetchDataColumnFromPeer( } // Send the request to the peer. - roDataColumns, err := prysmsync.SendDataColumnsByRangeRequest(ctx, f.clock, f.p2p, peer, f.ctxMap, request) + roDataColumns, err := prysmsync.SendDataColumnSidecarsByRangeRequest(ctx, f.clock, f.p2p, peer, f.ctxMap, request) if err != nil { log.WithError(err).Warning("Fetch data columns from peers - could not send data columns by range request") return @@ -1299,17 +1323,8 @@ func (f *blocksFetcher) fetchDataColumnFromPeer( return } - globalSuccess := false - - for _, dataColumn := range roDataColumns { - success := processDataColumn(wrappedBwbsMissingColumns, f.cv, blocksByRoot, indicesByRoot, dataColumn) - if success { - globalSuccess = true - } - } - - if !globalSuccess { - log.Debug("Fetch data columns from peers - no valid data column returned") + if !f.processDataColumns(wrappedBwbsMissingColumns, blocksByRoot, indicesByRoot, roDataColumns) { + log.Warning("Fetch data columns from peers - at least one data column is invalid") return } diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index 610b765dd17e..524bc235a3a6 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -414,6 +414,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) { fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit)) }) } + func TestBlocksFetcher_handleRequest(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit chainConfig := struct { @@ -1988,14 +1989,9 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { {slot: 38, columnIndex: 6, alterate: true}, {slot: 38, columnIndex: 70}, }, - }, - (ðpb.DataColumnSidecarsByRangeRequest{ - StartSlot: 38, - Count: 1, - Columns: []uint64{6}, - }).String(): { { {slot: 38, columnIndex: 6}, + {slot: 38, columnIndex: 70}, }, }, }, @@ -2243,7 +2239,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb}, p2p: p2pSvc, bs: blobStorageSummarizer, - cv: newColumnVerifierFromInitializer(ini), + cv: newDataColumnsVerifierFromInitializer(ini), }) // Fetch the data columns from the peers. diff --git a/beacon-chain/sync/initial-sync/blocks_queue.go b/beacon-chain/sync/initial-sync/blocks_queue.go index 7db0b400ca8b..c4f781dfc99c 100644 --- a/beacon-chain/sync/initial-sync/blocks_queue.go +++ b/beacon-chain/sync/initial-sync/blocks_queue.go @@ -73,7 +73,7 @@ type blocksQueueConfig struct { mode syncMode bs filesystem.BlobStorageSummarizer bv verification.NewBlobVerifier - cv verification.NewColumnVerifier + cv verification.NewDataColumnsVerifier } // blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers) diff --git a/beacon-chain/sync/initial-sync/round_robin.go b/beacon-chain/sync/initial-sync/round_robin.go index 1b81db9a3e12..0f07ac199b7f 100644 --- a/beacon-chain/sync/initial-sync/round_robin.go +++ b/beacon-chain/sync/initial-sync/round_robin.go @@ -89,7 +89,7 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S mode: mode, bs: summarizer, bv: s.newBlobVerifier, - cv: s.newColumnVerifier, + cv: s.newDataColumnsVerifier, } queue := newBlocksQueue(ctx, cfg) if err := queue.start(); err != nil { @@ -176,8 +176,7 @@ func (s *Service) processFetchedDataRegSync( return } if coreTime.PeerDASIsActive(startSlot) { - bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements) - avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID()) + avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage) batchFields := logrus.Fields{ "firstSlot": data.bwb[0].Block.Block().Slot(), "firstUnprocessed": bwb[0].Block.Block().Slot(), @@ -367,8 +366,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time, } var aStore das.AvailabilityStore if coreTime.PeerDASIsActive(first.Block().Slot()) { - bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements) - avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID()) + avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage) s.logBatchSyncStatus(genesis, first, len(bwb)) for _, bb := range bwb { if len(bb.Columns) == 0 { diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index 37a2caccca70..2760964f3274 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -27,6 +27,7 @@ import ( p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types" "github.com/prysmaticlabs/prysm/v5/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify" "github.com/prysmaticlabs/prysm/v5/beacon-chain/verification" "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v5/config/params" @@ -61,18 +62,18 @@ type Config struct { // Service service. type Service struct { - cfg *Config - ctx context.Context - cancel context.CancelFunc - synced *abool.AtomicBool - chainStarted *abool.AtomicBool - counter *ratecounter.RateCounter - genesisChan chan time.Time - clock *startup.Clock - verifierWaiter *verification.InitializerWaiter - newBlobVerifier verification.NewBlobVerifier - newColumnVerifier verification.NewColumnVerifier - ctxMap sync.ContextByteVersions + cfg *Config + ctx context.Context + cancel context.CancelFunc + synced *abool.AtomicBool + chainStarted *abool.AtomicBool + counter *ratecounter.RateCounter + genesisChan chan time.Time + clock *startup.Clock + verifierWaiter *verification.InitializerWaiter + newBlobVerifier verification.NewBlobVerifier + newDataColumnsVerifier verification.NewDataColumnsVerifier + ctxMap sync.ContextByteVersions } // Option is a functional option for the initial-sync Service. @@ -153,7 +154,7 @@ func (s *Service) Start() { return } s.newBlobVerifier = newBlobVerifierFromInitializer(v) - s.newColumnVerifier = newColumnVerifierFromInitializer(v) + s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v) gt := clock.GenesisTime() if gt.IsZero() { @@ -460,8 +461,22 @@ func (s *Service) fetchOriginColumns(pids []peer.ID) error { if len(sidecars) != len(req) { continue } - bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements) - avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID()) + + wrappedBlockDataColumns := make([]verify.WrappedBlockDataColumn, 0, len(sidecars)) + for _, sidecar := range sidecars { + wrappedBlockDataColumn := verify.WrappedBlockDataColumn{ + ROBlock: blk.Block(), + RODataColumn: sidecar, + } + + wrappedBlockDataColumns = append(wrappedBlockDataColumns, wrappedBlockDataColumn) + } + + if err := verify.DataColumnsAlignWithBlock(wrappedBlockDataColumns, s.newDataColumnsVerifier); err != nil { + return errors.Wrap(err, "data columns align with block") + } + + avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage) current := s.clock.CurrentSlot() if err := avs.PersistColumns(current, sidecars...); err != nil { return err @@ -491,8 +506,8 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification. } } -func newColumnVerifierFromInitializer(ini *verification.Initializer) verification.NewColumnVerifier { - return func(d blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnVerifier { - return ini.NewColumnVerifier(d, reqs) +func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier { + return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier { + return ini.NewDataColumnsVerifier(roDataColumns, reqs) } } diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index cccb99bc080b..192f09fd064e 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -495,8 +495,8 @@ func TestOriginOutsideRetention(t *testing.T) { bdb := dbtest.SetupDB(t) genesis := time.Unix(0, 0) secsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch) - retentionSeconds := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch) - outsideRetention := genesis.Add(retentionSeconds) + retentionDuration := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch) + outsideRetention := genesis.Add(retentionDuration) now := func() time.Time { return outsideRetention } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root.go b/beacon-chain/sync/rpc_beacon_blocks_by_root.go index e4b8635cf5d9..0d6df49ce2b3 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root.go @@ -56,7 +56,7 @@ func (s *Service) sendBeaconBlocksRequest( defer s.pendingQueueLock.Unlock() if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil { - return err + return errors.Wrapf(err, "insert block to pending queue for block with root %x", blkRoot) } return nil @@ -232,15 +232,26 @@ func (s *Service) requestAndSaveDataColumnSidecars( return err } - RoBlock, err := blocks.NewROBlock(block) + roBlock, err := blocks.NewROBlock(block) if err != nil { return err } + wrappedBlockDataColumns := make([]verify.WrappedBlockDataColumn, 0, len(sidecars)) for _, sidecar := range sidecars { - if err := verify.ColumnAlignsWithBlock(sidecar, RoBlock, s.newColumnVerifier); err != nil { - return err + wrappedBlockDataColumn := verify.WrappedBlockDataColumn{ + ROBlock: roBlock.Block(), + RODataColumn: sidecar, } + + wrappedBlockDataColumns = append(wrappedBlockDataColumns, wrappedBlockDataColumn) + } + + if err := verify.DataColumnsAlignWithBlock(wrappedBlockDataColumns, s.newColumnsVerifier); err != nil { + return errors.Wrap(err, "data columns align with block") + } + + for _, sidecar := range sidecars { log.WithFields(logging.DataColumnFields(sidecar)).Debug("Received data column sidecar RPC") } diff --git a/beacon-chain/sync/rpc_send_request.go b/beacon-chain/sync/rpc_send_request.go index 0306092f099c..2cfa6b06f030 100644 --- a/beacon-chain/sync/rpc_send_request.go +++ b/beacon-chain/sync/rpc_send_request.go @@ -331,7 +331,9 @@ func dataColumnIndexValidatorFromRangeReq(req *pb.DataColumnSidecarsByRangeReque } } -func SendDataColumnsByRangeRequest( +// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range +// and returns the fetched data column sidecars. +func SendDataColumnSidecarsByRangeRequest( ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.P2P, diff --git a/beacon-chain/sync/service.go b/beacon-chain/sync/service.go index 41316a7142f8..1db6d1693a6c 100644 --- a/beacon-chain/sync/service.go +++ b/beacon-chain/sync/service.go @@ -163,7 +163,7 @@ type Service struct { initialSyncComplete chan struct{} verifierWaiter *verification.InitializerWaiter newBlobVerifier verification.NewBlobVerifier - newColumnVerifier verification.NewColumnVerifier + newColumnsVerifier verification.NewDataColumnsVerifier availableBlocker coverage.AvailableBlocker dataColumsnReconstructionLock sync.Mutex receivedDataColumnsFromRoot *gcache.Cache @@ -234,9 +234,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification. } } -func newColumnVerifierFromInitializer(ini *verification.Initializer) verification.NewColumnVerifier { - return func(d blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnVerifier { - return ini.NewColumnVerifier(d, reqs) +func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier { + return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier { + return ini.NewDataColumnsVerifier(roDataColumns, reqs) } } @@ -248,7 +248,7 @@ func (s *Service) Start() { return } s.newBlobVerifier = newBlobVerifierFromInitializer(v) - s.newColumnVerifier = newColumnVerifierFromInitializer(v) + s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v) go s.verifierRoutine() go s.startTasksPostInitialSync() diff --git a/beacon-chain/sync/validate_data_column.go b/beacon-chain/sync/validate_data_column.go index 340aea4b4587..2a83e8d62a02 100644 --- a/beacon-chain/sync/validate_data_column.go +++ b/beacon-chain/sync/validate_data_column.go @@ -56,33 +56,35 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs return pubsub.ValidationReject, errWrongMessage } - ds, err := blocks.NewRODataColumn(dspb) + roDataColumn, err := blocks.NewRODataColumn(dspb) if err != nil { return pubsub.ValidationReject, errors.Wrap(err, "roDataColumn conversion failure") } // Voluntary ignore messages (for debugging purposes). dataColumnsIgnoreSlotMultiple := features.Get().DataColumnsIgnoreSlotMultiple - blockSlot := uint64(ds.SignedBlockHeader.Header.Slot) + blockSlot := uint64(roDataColumn.SignedBlockHeader.Header.Slot) if dataColumnsIgnoreSlotMultiple != 0 && blockSlot%dataColumnsIgnoreSlotMultiple == 0 { log.WithFields(logrus.Fields{ "slot": blockSlot, - "columnIndex": ds.ColumnIndex, - "blockRoot": fmt.Sprintf("%#x", ds.BlockRoot()), + "columnIndex": roDataColumn.ColumnIndex, + "blockRoot": fmt.Sprintf("%#x", roDataColumn.BlockRoot()), }).Warning("Voluntary ignore data column sidecar gossip") return pubsub.ValidationIgnore, err } - verifier := s.newColumnVerifier(ds, verification.GossipColumnSidecarRequirements) + roDataColumns := []blocks.RODataColumn{roDataColumn} - if err := verifier.DataColumnIndexInBounds(); err != nil { + verifier := s.newColumnsVerifier(roDataColumns, verification.GossipColumnSidecarRequirements) + + if err := verifier.DataColumnsIndexInBounds(); err != nil { return pubsub.ValidationReject, err } // [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id. - want := fmt.Sprintf("data_column_sidecar_%d", computeSubnetForColumnSidecar(ds.ColumnIndex)) + want := fmt.Sprintf("data_column_sidecar_%d", computeSubnetForColumnSidecar(roDataColumn.ColumnIndex)) if !strings.Contains(*msg.Topic, want) { log.Debug("Column Sidecar index does not match topic") return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic) @@ -93,7 +95,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs } // [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index, sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof. - if s.hasSeenDataColumnIndex(ds.Slot(), ds.ProposerIndex(), ds.DataColumnSidecar.ColumnIndex) { + if s.hasSeenDataColumnIndex(roDataColumn.Slot(), roDataColumn.ProposerIndex(), roDataColumn.DataColumnSidecar.ColumnIndex) { return pubsub.ValidationIgnore, nil } @@ -104,11 +106,11 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs // If we haven't seen the parent, request it asynchronously. go func() { customCtx := context.Background() - parentRoot := ds.ParentRoot() + parentRoot := roDataColumn.ParentRoot() roots := [][fieldparams.RootLength]byte{parentRoot} randGenerator := rand.NewGenerator() if err := s.sendBatchRootRequest(customCtx, roots, randGenerator); err != nil { - log.WithError(err).WithFields(logging.DataColumnFields(ds)).Debug("Failed to send batch root request") + log.WithError(err).WithFields(logging.DataColumnFields(roDataColumn)).Debug("Failed to send batch root request") } }() @@ -141,17 +143,25 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs } // Get the time at slot start. - startTime, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), ds.SignedBlockHeader.Header.Slot) + startTime, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), roDataColumn.SignedBlockHeader.Header.Slot) if err != nil { return pubsub.ValidationIgnore, err } - verifiedRODataColumn, err := verifier.VerifiedRODataColumn() + verifiedRODataColumns, err := verifier.VerifiedRODataColumns() if err != nil { return pubsub.ValidationReject, err } - msg.ValidatorData = verifiedRODataColumn + verifiedRODataColumnsCount := len(verifiedRODataColumns) + + if verifiedRODataColumnsCount != 1 { + // This should never happen. + log.WithField("verifiedRODataColumnsCount", verifiedRODataColumnsCount).Error("Verified data columns count is not 1") + return pubsub.ValidationIgnore, errors.New("Wrong number of verified data columns") + } + + msg.ValidatorData = verifiedRODataColumns[0] sinceSlotStartTime := receivedTime.Sub(startTime) validationTime := s.cfg.clock.Now().Sub(receivedTime) @@ -161,7 +171,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs pidString := pid.String() log. - WithFields(logging.DataColumnFields(ds)). + WithFields(logging.DataColumnFields(roDataColumn)). WithFields(logrus.Fields{ "sinceSlotStartTime": sinceSlotStartTime, "validationTime": validationTime, diff --git a/beacon-chain/sync/verify/BUILD.bazel b/beacon-chain/sync/verify/BUILD.bazel index 16f4c62af5f3..f88832b67032 100644 --- a/beacon-chain/sync/verify/BUILD.bazel +++ b/beacon-chain/sync/verify/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//beacon-chain/verification:go_default_library", "//config/fieldparams:go_default_library", "//consensus-types/blocks:go_default_library", + "//consensus-types/interfaces:go_default_library", "//encoding/bytesutil:go_default_library", "//runtime/version:go_default_library", "@com_github_pkg_errors//:go_default_library", diff --git a/beacon-chain/sync/verify/blob.go b/beacon-chain/sync/verify/blob.go index 8fdd089205af..c2c4f0c06c25 100644 --- a/beacon-chain/sync/verify/blob.go +++ b/beacon-chain/sync/verify/blob.go @@ -7,6 +7,7 @@ import ( "github.com/prysmaticlabs/prysm/v5/beacon-chain/verification" fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams" "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces" "github.com/prysmaticlabs/prysm/v5/encoding/bytesutil" "github.com/prysmaticlabs/prysm/v5/runtime/version" ) @@ -52,39 +53,66 @@ func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error { return nil } -func ColumnAlignsWithBlock(col blocks.RODataColumn, block blocks.ROBlock, colVerifier verification.NewColumnVerifier) error { - // Exit early if the block is not at least a Deneb block. - if block.Version() < version.Deneb { - return nil - } - - // Check if the block root in the column sidecar matches the block root. - if col.BlockRoot() != block.Root() { - return ErrColumnBlockMisaligned - } +type WrappedBlockDataColumn struct { + ROBlock interfaces.ReadOnlyBeaconBlock + RODataColumn blocks.RODataColumn +} - // Verify commitment byte values match - commitments, err := block.Block().Body().BlobKzgCommitments() - if err != nil { - return errors.Wrap(err, "blob KZG commitments") +func DataColumnsAlignWithBlock( + wrappedBlockDataColumns []WrappedBlockDataColumn, + dataColumnsVerifier verification.NewDataColumnsVerifier, +) error { + for _, wrappedBlockDataColumn := range wrappedBlockDataColumns { + dataColumn := wrappedBlockDataColumn.RODataColumn + block := wrappedBlockDataColumn.ROBlock + + // Extract the block root from the data column. + blockRoot := dataColumn.BlockRoot() + + // Retrieve the KZG commitments from the block. + blockKZGCommitments, err := block.Body().BlobKzgCommitments() + if err != nil { + return errors.Wrap(err, "blob KZG commitments") + } + + // Retrieve the KZG commitments from the data column. + dataColumnKZGCommitments := dataColumn.KzgCommitments + + // Verify the commitments in the block match the commitments in the data column. + if !reflect.DeepEqual(blockKZGCommitments, dataColumnKZGCommitments) { + // Retrieve the data columns slot. + dataColumSlot := dataColumn.Slot() + + return errors.Wrapf( + ErrMismatchedColumnCommitments, + "data column commitments `%#v` != block commitments `%#v` for block root %#x at slot %d", + dataColumnKZGCommitments, + blockKZGCommitments, + blockRoot, + dataColumSlot, + ) + } } - if !reflect.DeepEqual(commitments, col.KzgCommitments) { - return errors.Wrapf(ErrMismatchedColumnCommitments, "commitment %#v != block commitment %#v for block root %#x at slot %d ", col.KzgCommitments, commitments, block.Root(), col.Slot()) + dataColumns := make([]blocks.RODataColumn, 0, len(wrappedBlockDataColumns)) + for _, wrappedBlowrappedBlockDataColumn := range wrappedBlockDataColumns { + dataColumn := wrappedBlowrappedBlockDataColumn.RODataColumn + dataColumns = append(dataColumns, dataColumn) } - vf := colVerifier(col, verification.InitsyncColumnSidecarRequirements) - if err := vf.DataColumnIndexInBounds(); err != nil { - return errors.Wrap(err, "data column index out of bounds") + // Verify if data columns index are in bounds. + verifier := dataColumnsVerifier(dataColumns, verification.InitsyncColumnSidecarRequirements) + if err := verifier.DataColumnsIndexInBounds(); err != nil { + return errors.Wrap(err, "data column index in bounds") } - // Filter out columns which did not pass the KZG inclusion proof verification. - if err := vf.SidecarInclusionProven(); err != nil { + // Verify the KZG inclusion proof verification. + if err := verifier.SidecarInclusionProven(); err != nil { return errors.Wrap(err, "inclusion proof verification") } - // Filter out columns which did not pass the KZG proof verification. - if err := vf.SidecarKzgProofVerified(); err != nil { + // Verify the KZG proof verification. + if err := verifier.SidecarKzgProofVerified(); err != nil { return errors.Wrap(err, "KZG proof verification") } diff --git a/beacon-chain/verification/batch.go b/beacon-chain/verification/batch.go index 9a7bcca64d46..22c2c7cdc769 100644 --- a/beacon-chain/verification/batch.go +++ b/beacon-chain/verification/batch.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg" - "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v5/encoding/bytesutil" ) @@ -43,7 +42,7 @@ type BlobBatchVerifier struct { } // VerifiedROBlobs satisfies the das.BlobBatchVerifier interface, used by das.AvailabilityStore. -func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) { +func (batch *BlobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) { if len(scs) == 0 { return nil, nil } @@ -93,85 +92,3 @@ func (batch *BlobBatchVerifier) verifyOneBlob(sc blocks.ROBlob) (blocks.Verified return bv.VerifiedROBlob() } - -// NewDataColumnBatchVerifier initializes a data column batch verifier. It requires the caller to correctly specify -// verification Requirements and to also pass in a NewColumnVerifier, which is a callback function that -// returns a new ColumnVerifier for handling a single column in the batch. -func NewDataColumnBatchVerifier(newVerifier NewColumnVerifier, reqs []Requirement) *DataColumnBatchVerifier { - return &DataColumnBatchVerifier{ - verifyKzg: peerdas.VerifyDataColumnSidecarKZGProofs, - newVerifier: newVerifier, - reqs: reqs, - } -} - -// DataColumnBatchVerifier solves problems that come from verifying batches of data columns from RPC. -// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch -// won't be in forkchoice yet. -// Second: it is more efficient to batch some verifications, like kzg commitment verification. Batch adds a -// method to ColumnVerifier to verify the kzg commitments of all data column sidecars for a block together, then using the cached -// result of the batch verification when verifying the individual columns. -type DataColumnBatchVerifier struct { - verifyKzg rodataColumnCommitmentVerifier - newVerifier NewColumnVerifier - reqs []Requirement -} - -// VerifiedRODataColumns satisfies the das.ColumnBatchVerifier interface, used by das.AvailabilityStore. -func (batch *DataColumnBatchVerifier) VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) { - if len(scs) == 0 { - return nil, nil - } - blkSig := blk.Signature() - // We assume the proposer is validated wrt the block in batch block processing before performing the DA check. - // So at this stage we just need to make sure the value being signed and signature bytes match the block. - for i := range scs { - blobSig := bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) - if blkSig != blobSig { - return nil, ErrBatchSignatureMismatch - } - // Extra defensive check to make sure the roots match. This should be unnecessary in practice since the root from - // the block should be used as the lookup key into the cache of sidecars. - if blk.Root() != scs[i].BlockRoot() { - return nil, ErrBatchBlockRootMismatch - } - } - // Verify commitments for all columns at once. verifyOneColumn assumes it is only called once this check succeeds. - for i := range scs { - verified, err := batch.verifyKzg(scs[i]) - if err != nil { - return nil, err - } - if !verified { - return nil, ErrSidecarKzgProofInvalid - } - } - - vs := make([]blocks.VerifiedRODataColumn, len(scs)) - for i := range scs { - vb, err := batch.verifyOneColumn(scs[i]) - if err != nil { - return nil, err - } - vs[i] = vb - } - return vs, nil -} - -func (batch *DataColumnBatchVerifier) verifyOneColumn(sc blocks.RODataColumn) (blocks.VerifiedRODataColumn, error) { - vb := blocks.VerifiedRODataColumn{} - bv := batch.newVerifier(sc, batch.reqs) - // We can satisfy the following 2 requirements immediately because VerifiedROColumns always verifies commitments - // and block signature for all columns in the batch before calling verifyOneColumn. - bv.SatisfyRequirement(RequireSidecarKzgProofVerified) - bv.SatisfyRequirement(RequireValidProposerSignature) - - if err := bv.DataColumnIndexInBounds(); err != nil { - return vb, err - } - if err := bv.SidecarInclusionProven(); err != nil { - return vb, err - } - - return bv.VerifiedRODataColumn() -} diff --git a/beacon-chain/verification/blob_test.go b/beacon-chain/verification/blob_test.go index 71f9d5408e26..0974af62cf77 100644 --- a/beacon-chain/verification/blob_test.go +++ b/beacon-chain/verification/blob_test.go @@ -475,7 +475,7 @@ func TestSidecarProposerExpected(t *testing.T) { t.Run("not cached, proposer matches", func(t *testing.T) { pc := &mockProposerCache{ ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { require.Equal(t, b.ParentRoot(), root) require.Equal(t, b.Slot(), slot) return b.ProposerIndex(), nil @@ -490,7 +490,7 @@ func TestSidecarProposerExpected(t *testing.T) { t.Run("not cached, proposer does not match", func(t *testing.T) { pc := &mockProposerCache{ ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { require.Equal(t, b.ParentRoot(), root) require.Equal(t, b.Slot(), slot) return b.ProposerIndex() + 1, nil @@ -505,7 +505,7 @@ func TestSidecarProposerExpected(t *testing.T) { t.Run("not cached, ComputeProposer fails", func(t *testing.T) { pc := &mockProposerCache{ ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { require.Equal(t, b.ParentRoot(), root) require.Equal(t, b.Slot(), slot) return 0, errors.New("ComputeProposer failed") diff --git a/beacon-chain/verification/data_column.go b/beacon-chain/verification/data_column.go index a3a0a701fdee..b15fce29bb32 100644 --- a/beacon-chain/verification/data_column.go +++ b/beacon-chain/verification/data_column.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types" - "github.com/prysmaticlabs/prysm/v5/beacon-chain/state" fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams" "github.com/prysmaticlabs/prysm/v5/config/params" "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks" @@ -69,26 +68,32 @@ var ( ErrColumnIndexInvalid = errors.New("incorrect column sidecar index") ) -type RODataColumnVerifier struct { +type RODataColumnsVerifier struct { *sharedResources - results *results - dataColumn blocks.RODataColumn - parent state.BeaconState - verifyDataColumnCommitment rodataColumnCommitmentVerifier + results *results + dataColumns []blocks.RODataColumn + verifyDataColumnsCommitment rodataColumnsCommitmentVerifier } -type rodataColumnCommitmentVerifier func(blocks.RODataColumn) (bool, error) +type rodataColumnsCommitmentVerifier func([]blocks.RODataColumn) (bool, error) -var _ DataColumnVerifier = &RODataColumnVerifier{} +var _ DataColumnsVerifier = &RODataColumnsVerifier{} -// VerifiedRODataColumn "upgrades" the wrapped ROBlob to a VerifiedROBlob. +// VerifiedRODataColumns "upgrades" the wrapped ROBlob to a VerifiedROBlob. // If any of the verifications ran against the blob failed, or some required verifications // were not run, an error will be returned. -func (dv *RODataColumnVerifier) VerifiedRODataColumn() (blocks.VerifiedRODataColumn, error) { +func (dv *RODataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) { if dv.results.allSatisfied() { - return blocks.NewVerifiedRODataColumn(dv.dataColumn), nil + verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dv.dataColumns)) + for _, dataColumn := range dv.dataColumns { + verifiedRODataColumn := blocks.NewVerifiedRODataColumn(dataColumn) + verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn) + } + + return verifiedRODataColumns, nil } - return blocks.VerifiedRODataColumn{}, dv.results.errors(ErrColumnInvalid) + + return nil, dv.results.errors(ErrColumnInvalid) } // SatisfyRequirement allows the caller to assert that a requirement has been satisfied. @@ -97,11 +102,11 @@ func (dv *RODataColumnVerifier) VerifiedRODataColumn() (blocks.VerifiedRODataCol // forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to // assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying // a requirement outside of this package. -func (dv *RODataColumnVerifier) SatisfyRequirement(req Requirement) { +func (dv *RODataColumnsVerifier) SatisfyRequirement(req Requirement) { dv.recordResult(req, nil) } -func (dv *RODataColumnVerifier) recordResult(req Requirement, err *error) { +func (dv *RODataColumnsVerifier) recordResult(req Requirement, err *error) { if err == nil || *err == nil { dv.results.record(req, nil) return @@ -109,162 +114,281 @@ func (dv *RODataColumnVerifier) recordResult(req Requirement, err *error) { dv.results.record(req, *err) } -// DataColumnIndexInBounds represents the follow spec verification: +// DataColumnsIndexInBounds represents the follow spec verification: // [REJECT] The sidecar's index is consistent with NUMBER_OF_COLUMNS -- i.e. data_column_sidecar.index < NUMBER_OF_COLUMNS. -func (dv *RODataColumnVerifier) DataColumnIndexInBounds() (err error) { +func (dv *RODataColumnsVerifier) DataColumnsIndexInBounds() (err error) { defer dv.recordResult(RequireDataColumnIndexInBounds, &err) - if dv.dataColumn.ColumnIndex >= fieldparams.NumberOfColumns { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar index >= NUMBER_OF_COLUMNS") - return columnErrBuilder(ErrColumnIndexInvalid) + + for _, dataColumn := range dv.dataColumns { + if dataColumn.ColumnIndex >= fieldparams.NumberOfColumns { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Sidecar index >= NUMBER_OF_COLUMNS") + return columnErrBuilder(ErrColumnIndexInvalid) + } } + return nil } // NotFromFutureSlot represents the spec verification: // [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) // -- i.e. validate that block_header.slot <= current_slot -func (dv *RODataColumnVerifier) NotFromFutureSlot() (err error) { +func (dv *RODataColumnsVerifier) NotFromFutureSlot() (err error) { defer dv.recordResult(RequireNotFromFutureSlot, &err) - if dv.clock.CurrentSlot() == dv.dataColumn.Slot() { - return nil - } - // earliestStart represents the time the slot starts, lowered by MAXIMUM_GOSSIP_CLOCK_DISPARITY. - // We lower the time by MAXIMUM_GOSSIP_CLOCK_DISPARITY in case system time is running slightly behind real time. - earliestStart := dv.clock.SlotStart(dv.dataColumn.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration()) - // If the system time is still before earliestStart, we consider the column from a future slot and return an error. - if dv.clock.Now().Before(earliestStart) { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar slot is too far in the future") - return columnErrBuilder(ErrFromFutureSlot) + + // Retrieve the current slot. + currentSlot := dv.clock.CurrentSlot() + + // Get the current time. + now := dv.clock.Now() + + // Retrieve the maximum gossip clock disparity. + maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration() + + for _, dataColumn := range dv.dataColumns { + // Extract the data column slot. + dataColumnSlot := dataColumn.Slot() + + // Skip if the data column slotis the same as the current slot. + if currentSlot == dataColumnSlot { + continue + } + + // earliestStart represents the time the slot starts, lowered by MAXIMUM_GOSSIP_CLOCK_DISPARITY. + // We lower the time by MAXIMUM_GOSSIP_CLOCK_DISPARITY in case system time is running slightly behind real time. + earliestStart := dv.clock.SlotStart(dataColumnSlot).Add(-maximumGossipClockDisparity) + + // If the system time is still before earliestStart, we consider the column from a future slot and return an error. + if now.Before(earliestStart) { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Sidecar slot is too far in the future") + + return columnErrBuilder(ErrFromFutureSlot) + } } + return nil } // SlotAboveFinalized represents the spec verification: // [IGNORE] The sidecar is from a slot greater than the latest finalized slot // -- i.e. validate that block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) -func (dv *RODataColumnVerifier) SlotAboveFinalized() (err error) { +func (dv *RODataColumnsVerifier) SlotAboveFinalized() (err error) { defer dv.recordResult(RequireSlotAboveFinalized, &err) - fcp := dv.fc.FinalizedCheckpoint() - fSlot, err := slots.EpochStart(fcp.Epoch) + + // Retrieve the finalized checkpoint. + finalizedCheckpoint := dv.fc.FinalizedCheckpoint() + + // Compute the first slot of the finalized checkpoint epoch. + startSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch) if err != nil { - return errors.Wrapf(columnErrBuilder(ErrSlotNotAfterFinalized), "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error()) + return errors.Wrapf( + columnErrBuilder(ErrSlotNotAfterFinalized), + "error computing epoch start slot for finalized checkpoint (%d) %s", + finalizedCheckpoint.Epoch, + err.Error(), + ) } - if dv.dataColumn.Slot() <= fSlot { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar slot is not after finalized checkpoint") - return columnErrBuilder(ErrSlotNotAfterFinalized) + + for _, dataColumn := range dv.dataColumns { + // Extract the data column slot. + dataColumnSlot := dataColumn.Slot() + + // Check if the data column slot is after first slot of the epoch corresponding to the finalized checkpoint. + if dataColumnSlot <= startSlot { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Sidecar slot is not after finalized checkpoint") + + return columnErrBuilder(ErrSlotNotAfterFinalized) + } } + return nil } // ValidProposerSignature represents the spec verification: // [REJECT] The proposer signature of sidecar.signed_block_header, is valid with respect to the block_header.proposer_index pubkey. -func (dv *RODataColumnVerifier) ValidProposerSignature(ctx context.Context) (err error) { +func (dv *RODataColumnsVerifier) ValidProposerSignature(ctx context.Context) (err error) { defer dv.recordResult(RequireValidProposerSignature, &err) - sd := columnToSignatureData(dv.dataColumn) - // First check if there is a cached verification that can be reused. - seen, err := dv.sc.SignatureVerified(sd) - if seen { - columnVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc() + + for _, dataColumn := range dv.dataColumns { + // Extract the signature data from the data column. + signatureData := columnToSignatureData(dataColumn) + + // Get logging fields. + fields := logging.DataColumnFields(dataColumn) + log := log.WithFields(fields) + + // First check if there is a cached verification that can be reused. + seen, err := dv.sc.SignatureVerified(signatureData) if err != nil { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Reusing failed proposer signature validation from cache") + log.WithError(err).Debug("Reusing failed proposer signature validation from cache") + blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc() return columnErrBuilder(ErrInvalidProposerSignature) } - return nil - } - columnVerificationProposerSignatureCache.WithLabelValues("miss").Inc() - // Retrieve the parent state to fallback to full verification. - parent, err := dv.parentState(ctx) - if err != nil { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Could not replay parent state for column signature verification") - return columnErrBuilder(ErrInvalidProposerSignature) - } - // Full verification, which will subsequently be cached for anything sharing the signature cache. - if err = dv.sc.VerifySignature(sd, parent); err != nil { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Signature verification failed") - return columnErrBuilder(ErrInvalidProposerSignature) + // If yes, we can skip the full verification. + if seen { + columnVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc() + continue + } + + columnVerificationProposerSignatureCache.WithLabelValues("miss").Inc() + + // Retrieve the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + // Retrieve the parentState state to fallback to full verification. + parentState, err := dv.sr.StateByRoot(ctx, parentRoot) + if err != nil { + log.WithError(err).Debug("Could not replay parent state for column signature verification") + return columnErrBuilder(ErrInvalidProposerSignature) + } + + // Full verification, which will subsequently be cached for anything sharing the signature cache. + if err = dv.sc.VerifySignature(signatureData, parentState); err != nil { + log.WithError(err).Debug("Signature verification failed") + return columnErrBuilder(ErrInvalidProposerSignature) + } } + return nil } // SidecarParentSeen represents the spec verification: // [IGNORE] The sidecar's block's parent (defined by block_header.parent_root) has been seen // (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -func (dv *RODataColumnVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err error) { +func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err error) { defer dv.recordResult(RequireSidecarParentSeen, &err) - if parentSeen != nil && parentSeen(dv.dataColumn.ParentRoot()) { - return nil - } - if dv.fc.HasNode(dv.dataColumn.ParentRoot()) { - return nil + + for _, dataColumn := range dv.dataColumns { + // Extract the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + // Skip if the parent root has been seen. + if parentSeen != nil && parentSeen(parentRoot) { + continue + } + + if !dv.fc.HasNode(parentRoot) { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Parent root has not been seen") + return columnErrBuilder(ErrSidecarParentNotSeen) + } } - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root has not been seen") - return columnErrBuilder(ErrSidecarParentNotSeen) + + return nil } // SidecarParentValid represents the spec verification: // [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation. -func (dv *RODataColumnVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { +func (dv *RODataColumnsVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { defer dv.recordResult(RequireSidecarParentValid, &err) - if badParent != nil && badParent(dv.dataColumn.ParentRoot()) { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root is invalid") - return columnErrBuilder(ErrSidecarParentInvalid) + + for _, dataColumn := range dv.dataColumns { + // Extract the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + if badParent != nil && badParent(parentRoot) { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Parent root is invalid") + return columnErrBuilder(ErrSidecarParentInvalid) + } } + return nil } // SidecarParentSlotLower represents the spec verification: // [REJECT] The sidecar is from a higher slot than the sidecar's block's parent (defined by block_header.parent_root). -func (dv *RODataColumnVerifier) SidecarParentSlotLower() (err error) { +func (dv *RODataColumnsVerifier) SidecarParentSlotLower() (err error) { defer dv.recordResult(RequireSidecarParentSlotLower, &err) - parentSlot, err := dv.fc.Slot(dv.dataColumn.ParentRoot()) - if err != nil { - return errors.Wrap(columnErrBuilder(ErrSlotNotAfterParent), "parent root not in forkchoice") - } - if parentSlot >= dv.dataColumn.Slot() { - return ErrSlotNotAfterParent + + for _, dataColumn := range dv.dataColumns { + // Extract the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + // Compute the slot of the parent block. + parentSlot, err := dv.fc.Slot(parentRoot) + if err != nil { + return errors.Wrap(columnErrBuilder(ErrSlotNotAfterParent), "parent root not in forkchoice") + } + + // Extract the slot of the data column. + dataColumnSlot := dataColumn.Slot() + + // Check if the data column slot is after the parent slot. + if parentSlot >= dataColumnSlot { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Sidecar slot is not after parent slot") + return ErrSlotNotAfterParent + } } + return nil } // SidecarDescendsFromFinalized represents the spec verification: // [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block // -- i.e. get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root. -func (dv *RODataColumnVerifier) SidecarDescendsFromFinalized() (err error) { +func (dv *RODataColumnsVerifier) SidecarDescendsFromFinalized() (err error) { defer dv.recordResult(RequireSidecarDescendsFromFinalized, &err) - if !dv.fc.HasNode(dv.dataColumn.ParentRoot()) { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root not in forkchoice") - return columnErrBuilder(ErrSidecarNotFinalizedDescendent) + + for _, dataColumn := range dv.dataColumns { + // Extract the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + if !dv.fc.HasNode(parentRoot) { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("Parent root not in forkchoice") + return columnErrBuilder(ErrSidecarNotFinalizedDescendent) + } } + return nil } // SidecarInclusionProven represents the spec verification: // [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by verify_data_column_sidecar_inclusion_proof(sidecar). -func (dv *RODataColumnVerifier) SidecarInclusionProven() (err error) { +func (dv *RODataColumnsVerifier) SidecarInclusionProven() (err error) { defer dv.recordResult(RequireSidecarInclusionProven, &err) - if err = blocks.VerifyKZGInclusionProofColumn(dv.dataColumn); err != nil { - log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar inclusion proof verification failed") - return columnErrBuilder(ErrSidecarInclusionProofInvalid) + + for _, dataColumn := range dv.dataColumns { + if err = blocks.VerifyKZGInclusionProofColumn(dataColumn); err != nil { + fields := logging.DataColumnFields(dataColumn) + log.WithError(err).WithFields(fields).Debug("Sidecar inclusion proof verification failed") + return columnErrBuilder(ErrSidecarInclusionProofInvalid) + } } + return nil } // SidecarKzgProofVerified represents the spec verification: // [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs(sidecar). -func (dv *RODataColumnVerifier) SidecarKzgProofVerified() (err error) { +func (dv *RODataColumnsVerifier) SidecarKzgProofVerified() (err error) { defer dv.recordResult(RequireSidecarKzgProofVerified, &err) - ok, err := dv.verifyDataColumnCommitment(dv.dataColumn) + + ok, err := dv.verifyDataColumnsCommitment(dv.dataColumns) if err != nil { - log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("KZG commitment proof verification failed") + for _, dataColumn := range dv.dataColumns { + fields := logging.DataColumnFields(dataColumn) + log.WithError(err).WithFields(fields).Debug("Error verifying KZG commitment proof in the batch containing this sidecar") + } return columnErrBuilder(ErrSidecarKzgProofInvalid) } - if !ok { - log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("KZG commitment proof verification failed") - return columnErrBuilder(ErrSidecarKzgProofInvalid) + + if ok { + return nil } - return nil + + for _, dataColumn := range dv.dataColumns { + fields := logging.DataColumnFields(dataColumn) + log.WithFields(fields).Debug("KZG commitment proof verification failed in the batch containing this sidecar") + } + + return columnErrBuilder(ErrSidecarKzgProofInvalid) } // SidecarProposerExpected represents the spec verification: @@ -272,49 +396,66 @@ func (dv *RODataColumnVerifier) SidecarKzgProofVerified() (err error) { // in the context of the current shuffling (defined by block_header.parent_root/block_header.slot). // If the proposer_index cannot immediately be verified against the expected shuffling, the sidecar MAY be queued // for later processing while proposers for the block's branch are calculated -- in such a case do not REJECT, instead IGNORE this message. -func (dv *RODataColumnVerifier) SidecarProposerExpected(ctx context.Context) (err error) { +func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (err error) { defer dv.recordResult(RequireSidecarProposerExpected, &err) - e := slots.ToEpoch(dv.dataColumn.Slot()) - if e > 0 { - e = e - 1 - } - r, err := dv.fc.TargetRootForEpoch(dv.dataColumn.ParentRoot(), e) - if err != nil { - return columnErrBuilder(ErrSidecarUnexpectedProposer) - } - c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e} - idx, cached := dv.pc.Proposer(c, dv.dataColumn.Slot()) - if !cached { - pst, err := dv.parentState(ctx) + + for _, dataColumn := range dv.dataColumns { + // Extract the slot of the data column. + dataColumnSlot := dataColumn.Slot() + + // Compute the epoch of the data column slot. + dataColumnEpoch := slots.ToEpoch(dataColumnSlot) + if dataColumnEpoch > 0 { + dataColumnEpoch = dataColumnEpoch - 1 + } + + // Extract the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + // Compute the target root for the epoch. + targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch) if err != nil { - log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("State replay to parent_root failed") return columnErrBuilder(ErrSidecarUnexpectedProposer) } - idx, err = dv.pc.ComputeProposer(ctx, dv.dataColumn.ParentRoot(), dv.dataColumn.Slot(), pst) - if err != nil { - log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Error computing proposer index from parent state") + + // Create a checkpoint for the target root. + checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch} + + // Try to extract the proposer index from the data column in the cache. + idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot) + + if !cached { + // Retrieve the root of the parent block corresponding to the data column. + parentRoot := dataColumn.ParentRoot() + + // Retrieve the parentState state to fallback to full verification. + parentState, err := dv.sr.StateByRoot(ctx, parentRoot) + if err != nil { + fields := logging.DataColumnFields(dataColumn) + log.WithError(err).WithFields(fields).Debug("State replay to parent_root failed") + return columnErrBuilder(ErrSidecarUnexpectedProposer) + } + + idx, err = dv.pc.ComputeProposer(ctx, parentRoot, dataColumnSlot, parentState) + if err != nil { + fields := logging.DataColumnFields(dataColumn) + log.WithError(err).WithFields(fields).Debug("Error computing proposer index from parent state") + return columnErrBuilder(ErrSidecarUnexpectedProposer) + } + } + + if idx != dataColumn.ProposerIndex() { + fields := logging.DataColumnFields(dataColumn) + log.WithError(columnErrBuilder(ErrSidecarUnexpectedProposer)). + WithFields(fields). + WithField("expectedProposer", idx). + Debug("Unexpected column proposer") + return columnErrBuilder(ErrSidecarUnexpectedProposer) } } - if idx != dv.dataColumn.ProposerIndex() { - log.WithError(columnErrBuilder(ErrSidecarUnexpectedProposer)). - WithFields(logging.DataColumnFields(dv.dataColumn)).WithField("expectedProposer", idx). - Debug("unexpected column proposer") - return columnErrBuilder(ErrSidecarUnexpectedProposer) - } - return nil -} -func (dv *RODataColumnVerifier) parentState(ctx context.Context) (state.BeaconState, error) { - if dv.parent != nil { - return dv.parent, nil - } - st, err := dv.sr.StateByRoot(ctx, dv.dataColumn.ParentRoot()) - if err != nil { - return nil, err - } - dv.parent = st - return dv.parent, nil + return nil } func columnToSignatureData(d blocks.RODataColumn) SignatureData { diff --git a/beacon-chain/verification/data_column_test.go b/beacon-chain/verification/data_column_test.go index 4433d3f8830c..3c5ec6a4fde2 100644 --- a/beacon-chain/verification/data_column_test.go +++ b/beacon-chain/verification/data_column_test.go @@ -20,557 +20,878 @@ import ( "github.com/prysmaticlabs/prysm/v5/time/slots" ) -func TestColumnIndexInBounds(t *testing.T) { - ini := &Initializer{} - _, cols := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - b := cols[0] - // set Index to a value that is out of bounds - v := ini.NewColumnVerifier(b, GossipColumnSidecarRequirements) - require.NoError(t, v.DataColumnIndexInBounds()) - require.Equal(t, true, v.results.executed(RequireDataColumnIndexInBounds)) - require.NoError(t, v.results.result(RequireDataColumnIndexInBounds)) +func TestDataColumnsIndexInBounds(t *testing.T) { + testCases := []struct { + name string + columnsIndex uint64 + isError bool + }{ + { + name: "column index in bounds", + columnsIndex: 0, + isError: false, + }, + { + name: "column index out of bounds", + columnsIndex: fieldparams.NumberOfColumns, + isError: true, + }, + } - b.ColumnIndex = fieldparams.NumberOfColumns - v = ini.NewColumnVerifier(b, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.DataColumnIndexInBounds(), ErrColumnIndexInvalid) - require.Equal(t, true, v.results.executed(RequireDataColumnIndexInBounds)) - require.NotNil(t, v.results.result(RequireDataColumnIndexInBounds)) -} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [32]byte{} + initializer := Initializer{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + for _, column := range columns { + column.ColumnIndex = tc.columnsIndex + } + + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + + err := verifier.DataColumnsIndexInBounds() + require.Equal(t, true, verifier.results.executed(RequireDataColumnIndexInBounds)) -func TestColumnSlotNotTooEarly(t *testing.T) { - now := time.Now() - // make genesis 1 slot in the past - genesis := now.Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) - - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - c := columns[0] - // slot 1 should be 12 seconds after genesis - c.SignedBlockHeader.Header.Slot = 1 - - // This clock will give a current slot of 1 on the nose - happyClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now })) - ini := Initializer{shared: &sharedResources{clock: happyClock}} - v := ini.NewColumnVerifier(c, GossipColumnSidecarRequirements) - require.NoError(t, v.NotFromFutureSlot()) - require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot)) - require.NoError(t, v.results.result(RequireNotFromFutureSlot)) - - // Since we have an early return for slots that are directly equal, give a time that is less than max disparity - // but still in the previous slot. - closeClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration() / 2) })) - ini = Initializer{shared: &sharedResources{clock: closeClock}} - v = ini.NewColumnVerifier(c, GossipColumnSidecarRequirements) - require.NoError(t, v.NotFromFutureSlot()) - - // This clock will give a current slot of 0, with now coming more than max clock disparity before slot 1 - disparate := now.Add(-2 * params.BeaconConfig().MaximumGossipClockDisparityDuration()) - dispClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return disparate })) - // Set up initializer to use the clock that will set now to a little to far before slot 1 - ini = Initializer{shared: &sharedResources{clock: dispClock}} - v = ini.NewColumnVerifier(c, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.NotFromFutureSlot(), ErrFromFutureSlot) - require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot)) - require.NotNil(t, v.results.result(RequireNotFromFutureSlot)) + if tc.isError { + require.ErrorIs(t, err, ErrColumnIndexInvalid) + require.NotNil(t, verifier.results.result(RequireDataColumnIndexInBounds)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireDataColumnIndexInBounds)) + }) + } } -func TestColumnSlotAboveFinalized(t *testing.T) { - ini := &Initializer{shared: &sharedResources{}} - cases := []struct { - name string - slot primitives.Slot - finalizedSlot primitives.Slot - err error +func TestNotFromFutureSlot(t *testing.T) { + maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration() + + testCases := []struct { + name string + currentSlot, columnSlot primitives.Slot + timeBeforeCurrentSlot time.Duration + isError bool }{ { - name: "finalized epoch < column epoch", - slot: 32, + name: "column slot == current slot", + currentSlot: 42, + columnSlot: 42, + timeBeforeCurrentSlot: 0, + isError: false, }, { - name: "finalized slot < column slot (same epoch)", - slot: 31, + name: "within maximum gossip clock disparity", + currentSlot: 42, + columnSlot: 42, + timeBeforeCurrentSlot: maximumGossipClockDisparity / 2, + isError: false, }, { - name: "finalized epoch > column epoch", + name: "outside maximum gossip clock disparity", + currentSlot: 42, + columnSlot: 42, + timeBeforeCurrentSlot: maximumGossipClockDisparity * 2, + isError: true, + }, + { + name: "too far in the future", + currentSlot: 10, + columnSlot: 42, + timeBeforeCurrentSlot: 0, + isError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const blobCount = 1 + + now := time.Now() + secondsPerSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) + genesis := now.Add(-time.Duration(tc.currentSlot) * secondsPerSlot * time.Second) + + clock := startup.NewClock( + genesis, + [fieldparams.RootLength]byte{}, + startup.WithNower(func() time.Time { + return now.Add(-tc.timeBeforeCurrentSlot) + }), + ) + + parentRoot := [fieldparams.RootLength]byte{} + initializer := Initializer{shared: &sharedResources{clock: clock}} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, tc.columnSlot, blobCount) + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + + err := verifier.NotFromFutureSlot() + require.Equal(t, true, verifier.results.executed(RequireNotFromFutureSlot)) + + if tc.isError { + require.ErrorIs(t, err, ErrFromFutureSlot) + require.NotNil(t, verifier.results.result(RequireNotFromFutureSlot)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireNotFromFutureSlot)) + }) + } +} + +func TestColumnSlotAboveFinalized(t *testing.T) { + testCases := []struct { + name string + finalizedSlot, columnSlot primitives.Slot + isErr bool + }{ + { + name: "finalized epoch < column epoch", + finalizedSlot: 10, + columnSlot: 96, + isErr: false, + }, + { + name: "finalized slot < column slot (same epoch)", finalizedSlot: 32, - err: ErrSlotNotAfterFinalized, + columnSlot: 33, + isErr: false, }, { name: "finalized slot == column slot", - slot: 35, - finalizedSlot: 35, + finalizedSlot: 64, + columnSlot: 64, + isErr: true, + }, + { + name: "finalized epoch > column epoch", + finalizedSlot: 32, + columnSlot: 31, + isErr: true, }, } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - finalizedCB := func() *forkchoicetypes.Checkpoint { + for _, tc := range testCases { + const blobCount = 1 + + t.Run(tc.name, func(t *testing.T) { + finalizedCheckpoint := func() *forkchoicetypes.Checkpoint { return &forkchoicetypes.Checkpoint{ - Epoch: slots.ToEpoch(c.finalizedSlot), - Root: [32]byte{}, + Epoch: slots.ToEpoch(tc.finalizedSlot), + Root: [fieldparams.RootLength]byte{}, } } - ini.shared.fc = &mockForkchoicer{FinalizedCheckpointCB: finalizedCB} - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - col.SignedBlockHeader.Header.Slot = c.slot - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) + + parentRoot := [fieldparams.RootLength]byte{} + initializer := &Initializer{shared: &sharedResources{ + fc: &mockForkchoicer{FinalizedCheckpointCB: finalizedCheckpoint}, + }} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, tc.columnSlot, blobCount) + + v := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := v.SlotAboveFinalized() require.Equal(t, true, v.results.executed(RequireSlotAboveFinalized)) - if c.err == nil { - require.NoError(t, err) - require.NoError(t, v.results.result(RequireSlotAboveFinalized)) - } else { - require.ErrorIs(t, err, c.err) + + if tc.isErr { + require.ErrorIs(t, err, ErrSlotNotAfterFinalized) require.NotNil(t, v.results.result(RequireSlotAboveFinalized)) + return } + + require.NoError(t, err) + require.NoError(t, v.results.result(RequireSlotAboveFinalized)) }) } } -func TestDataColumnValidProposerSignature_Cached(t *testing.T) { - ctx := context.Background() - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - expectedSd := columnToSignatureData(col) - sc := &mockSignatureCache{ - svcb: func(sig SignatureData) (bool, error) { - if sig != expectedSd { - t.Error("Did not see expected SignatureData") - } - return true, nil +func TestValidProposerSignature(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + validator := ðpb.Validator{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] + + // The signature data does not depend on the data column itself, so we can use the first one. + expectedSignatureData := columnToSignatureData(firstColumn) + + testCases := []struct { + isError bool + vscbShouldError bool + svcbReturn bool + stateByRooter StateByRooter + vscbError error + svcbError error + name string + }{ + { + name: "cache hit - success", + svcbReturn: true, + svcbError: nil, + vscbShouldError: true, + vscbError: nil, + stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, + isError: false, }, - vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { - t.Error("VerifySignature should not be called if the result is cached") - return nil + { + name: "cache hit - error", + svcbReturn: true, + svcbError: errors.New("derp"), + vscbShouldError: true, + vscbError: nil, + stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, + isError: true, + }, + { + name: "cache miss - success", + svcbReturn: false, + svcbError: nil, + vscbShouldError: false, + vscbError: nil, + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + isError: false, + }, + { + name: "cache miss - state not found", + svcbReturn: false, + svcbError: nil, + vscbShouldError: false, + vscbError: nil, + stateByRooter: sbrNotFound(t, expectedSignatureData.Parent), + isError: true, + }, + { + name: "cache miss - signature failure", + svcbReturn: false, + svcbError: nil, + vscbShouldError: false, + vscbError: errors.New("signature, not so good!"), + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + isError: true, }, } - ini := Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.ValidProposerSignature(ctx)) - require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) - require.NoError(t, v.results.result(RequireValidProposerSignature)) - - // simulate an error in the cache - indicating the previous verification failed - sc.svcb = func(sig SignatureData) (bool, error) { - if sig != expectedSd { - t.Error("Did not see expected SignatureData") - } - return true, errors.New("derp") - } - ini = Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} - v = ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) - require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) - require.NotNil(t, v.results.result(RequireValidProposerSignature)) -} -func TestColumnValidProposerSignature_CacheMiss(t *testing.T) { - ctx := context.Background() - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - expectedSd := columnToSignatureData(col) - sc := &mockSignatureCache{ - svcb: func(sig SignatureData) (bool, error) { - return false, nil - }, - vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { - if expectedSd != sig { - t.Error("unexpected signature data") + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + signatureCache := &mockSignatureCache{ + svcb: func(signatureData SignatureData) (bool, error) { + if signatureData != expectedSignatureData { + t.Error("Did not see expected SignatureData") + } + return tc.svcbReturn, tc.svcbError + }, + vscb: func(signatureData SignatureData, _ ValidatorAtIndexer) (err error) { + if tc.vscbShouldError { + t.Error("VerifySignature should not be called if the result is cached") + return nil + } + + if expectedSignatureData != signatureData { + t.Error("unexpected signature data") + } + + return tc.vscbError + }, } - return nil - }, - } - ini := Initializer{shared: &sharedResources{sc: sc, sr: sbrForValOverride(col.ProposerIndex(), ðpb.Validator{})}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.ValidProposerSignature(ctx)) - require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) - require.NoError(t, v.results.result(RequireValidProposerSignature)) - - // simulate state not found - ini = Initializer{shared: &sharedResources{sc: sc, sr: sbrNotFound(t, expectedSd.Parent)}} - v = ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) - require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) - require.NotNil(t, v.results.result(RequireValidProposerSignature)) - - // simulate successful state lookup, but sig failure - sbr := sbrForValOverride(col.ProposerIndex(), ðpb.Validator{}) - sc = &mockSignatureCache{ - svcb: sc.svcb, - vscb: func(sig SignatureData, v ValidatorAtIndexer) (err error) { - if expectedSd != sig { - t.Error("unexpected signature data") + + initializer := Initializer{ + shared: &sharedResources{ + sc: signatureCache, + sr: tc.stateByRooter, + }, } - return errors.New("signature, not so good!") - }, + + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.ValidProposerSignature(context.Background()) + require.Equal(t, true, verifier.results.executed(RequireValidProposerSignature)) + + if tc.isError { + require.ErrorIs(t, err, ErrInvalidProposerSignature) + require.NotNil(t, verifier.results.result(RequireValidProposerSignature)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireValidProposerSignature)) + }) } - ini = Initializer{shared: &sharedResources{sc: sc, sr: sbr}} - v = ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - - // make sure all the histories are clean before calling the method - // so we don't get polluted by previous usages - require.Equal(t, false, sbr.calledForRoot[expectedSd.Parent]) - require.Equal(t, false, sc.svCalledForSig[expectedSd]) - require.Equal(t, false, sc.vsCalledForSig[expectedSd]) - - // Here we're mainly checking that all the right interfaces get used in the unhappy path - require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) - require.Equal(t, true, sbr.calledForRoot[expectedSd.Parent]) - require.Equal(t, true, sc.svCalledForSig[expectedSd]) - require.Equal(t, true, sc.vsCalledForSig[expectedSd]) - require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) - require.NotNil(t, v.results.result(RequireValidProposerSignature)) } -func TestColumnSidecarParentSeen(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] +func TestDataColumnsSidecarParentSeen(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] fcHas := &mockForkchoicer{ - HasNodeCB: func(parent [32]byte) bool { - if parent != col.ParentRoot() { + HasNodeCB: func(parent [fieldparams.RootLength]byte) bool { + if parent != firstColumn.ParentRoot() { t.Error("forkchoice.HasNode called with unexpected parent root") } + return true }, } + fcLacks := &mockForkchoicer{ - HasNodeCB: func(parent [32]byte) bool { - if parent != col.ParentRoot() { + HasNodeCB: func(parent [fieldparams.RootLength]byte) bool { + if parent != firstColumn.ParentRoot() { t.Error("forkchoice.HasNode called with unexpected parent root") } + return false }, } - t.Run("happy path", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: fcHas}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarParentSeen(nil)) - require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) - require.NoError(t, v.results.result(RequireSidecarParentSeen)) - }) - t.Run("HasNode false, no badParent cb, expected error", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarParentSeen(nil), ErrSidecarParentNotSeen) - require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) - require.NotNil(t, v.results.result(RequireSidecarParentSeen)) - }) - - t.Run("HasNode false, badParent true", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarParentSeen(badParentCb(t, col.ParentRoot(), true))) - require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) - require.NoError(t, v.results.result(RequireSidecarParentSeen)) - }) - t.Run("HasNode false, badParent false", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarParentSeen(badParentCb(t, col.ParentRoot(), false)), ErrSidecarParentNotSeen) - require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) - require.NotNil(t, v.results.result(RequireSidecarParentSeen)) - }) + testCases := []struct { + name string + forkChoicer Forkchoicer + parentSeen func([fieldparams.RootLength]byte) bool + isError bool + }{ + { + name: "happy path", + forkChoicer: fcHas, + parentSeen: nil, + isError: false, + }, + { + name: "HasNode false, no badParent cb, expected error", + forkChoicer: fcLacks, + parentSeen: nil, + isError: true, + }, + { + name: "HasNode false, badParent true", + forkChoicer: fcLacks, + parentSeen: badParentCb(t, firstColumn.ParentRoot(), true), + isError: false, + }, + { + name: "HasNode false, badParent false", + forkChoicer: fcLacks, + parentSeen: badParentCb(t, firstColumn.ParentRoot(), false), + isError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + initializer := Initializer{shared: &sharedResources{fc: tc.forkChoicer}} + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.SidecarParentSeen(tc.parentSeen) + require.Equal(t, true, verifier.results.executed(RequireSidecarParentSeen)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarParentNotSeen) + require.NotNil(t, verifier.results.result(RequireSidecarParentSeen)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarParentSeen)) + }) + } } -func TestColumnSidecarParentValid(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - t.Run("parent valid", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarParentValid(badParentCb(t, col.ParentRoot(), false))) - require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) - require.NoError(t, v.results.result(RequireSidecarParentValid)) - }) - t.Run("parent not valid", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarParentValid(badParentCb(t, col.ParentRoot(), true)), ErrSidecarParentInvalid) - require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) - require.NotNil(t, v.results.result(RequireSidecarParentValid)) - }) +func TestDataColumnsSidecarParentValid(t *testing.T) { + testCases := []struct { + name string + badParentCbReturn bool + isError bool + }{ + { + name: "parent valid", + badParentCbReturn: false, + isError: false, + }, + { + name: "parent not valid", + badParentCbReturn: true, + isError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] + + initializer := Initializer{shared: &sharedResources{}} + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.SidecarParentValid(badParentCb(t, firstColumn.ParentRoot(), tc.badParentCbReturn)) + require.Equal(t, true, verifier.results.executed(RequireSidecarParentValid)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarParentInvalid) + require.NotNil(t, verifier.results.result(RequireSidecarParentValid)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarParentValid)) + }) + } } func TestColumnSidecarParentSlotLower(t *testing.T) { _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 1, 1) - col := columns[0] + firstColumn := columns[0] + cases := []struct { - name string - fcSlot primitives.Slot - fcErr error - err error + name string + forkChoiceSlot primitives.Slot + forkChoiceError, err error }{ { - name: "not in fc", - fcErr: errors.New("not in forkchoice"), - err: ErrSlotNotAfterParent, + name: "Not in forkchoice", + forkChoiceError: errors.New("not in forkchoice"), + err: ErrSlotNotAfterParent, }, { - name: "in fc, slot lower", - fcSlot: col.Slot() - 1, + name: "In forkchoice, slot lower", + forkChoiceSlot: firstColumn.Slot() - 1, }, { - name: "in fc, slot equal", - fcSlot: col.Slot(), - err: ErrSlotNotAfterParent, + name: "In forkchoice, slot equal", + forkChoiceSlot: firstColumn.Slot(), + err: ErrSlotNotAfterParent, }, { - name: "in fc, slot higher", - fcSlot: col.Slot() + 1, - err: ErrSlotNotAfterParent, + name: "In forkchoice, slot higher", + forkChoiceSlot: firstColumn.Slot() + 1, + err: ErrSlotNotAfterParent, }, } + for _, c := range cases { t.Run(c.name, func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{SlotCB: func(r [32]byte) (primitives.Slot, error) { - if col.ParentRoot() != r { - t.Error("forkchoice.Slot called with unexpected parent root") - } - return c.fcSlot, c.fcErr - }}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - err := v.SidecarParentSlotLower() - require.Equal(t, true, v.results.executed(RequireSidecarParentSlotLower)) + initializer := Initializer{ + shared: &sharedResources{fc: &mockForkchoicer{ + SlotCB: func(r [32]byte) (primitives.Slot, error) { + if firstColumn.ParentRoot() != r { + t.Error("forkchoice.Slot called with unexpected parent root") + } + + return c.forkChoiceSlot, c.forkChoiceError + }, + }}, + } + + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.SidecarParentSlotLower() + require.Equal(t, true, verifier.results.executed(RequireSidecarParentSlotLower)) + if c.err == nil { require.NoError(t, err) - require.NoError(t, v.results.result(RequireSidecarParentSlotLower)) - } else { - require.ErrorIs(t, err, c.err) - require.NotNil(t, v.results.result(RequireSidecarParentSlotLower)) + require.NoError(t, verifier.results.result(RequireSidecarParentSlotLower)) + return } + + require.ErrorIs(t, err, c.err) + require.NotNil(t, verifier.results.result(RequireSidecarParentSlotLower)) }) } } -func TestColumnSidecarDescendsFromFinalized(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - t.Run("not canonical", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{HasNodeCB: func(r [32]byte) bool { - if col.ParentRoot() != r { - t.Error("forkchoice.Slot called with unexpected parent root") +func TestDataColumnsSidecarDescendsFromFinalized(t *testing.T) { + testCases := []struct { + name string + hasNodeCBReturn bool + isError bool + }{ + { + name: "Not canonical", + hasNodeCBReturn: false, + isError: true, + }, + { + name: "Canonical", + hasNodeCBReturn: true, + isError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] + + initializer := Initializer{ + shared: &sharedResources{ + fc: &mockForkchoicer{ + HasNodeCB: func(r [fieldparams.RootLength]byte) bool { + if firstColumn.ParentRoot() != r { + t.Error("forkchoice.Slot called with unexpected parent root") + } + + return tc.hasNodeCBReturn + }, + }, + }, } - return false - }}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarDescendsFromFinalized(), ErrSidecarNotFinalizedDescendent) - require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) - require.NotNil(t, v.results.result(RequireSidecarDescendsFromFinalized)) - }) - t.Run("canonical", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{fc: &mockForkchoicer{HasNodeCB: func(r [32]byte) bool { - if col.ParentRoot() != r { - t.Error("forkchoice.Slot called with unexpected parent root") + + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.SidecarDescendsFromFinalized() + require.Equal(t, true, verifier.results.executed(RequireSidecarDescendsFromFinalized)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarNotFinalizedDescendent) + require.NotNil(t, verifier.results.result(RequireSidecarDescendsFromFinalized)) + return } - return true - }}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarDescendsFromFinalized()) - require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) - require.NoError(t, v.results.result(RequireSidecarDescendsFromFinalized)) - }) -} -func TestColumnSidecarInclusionProven(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - - ini := Initializer{} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarInclusionProven()) - require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) - require.NoError(t, v.results.result(RequireSidecarInclusionProven)) - - // Invert bits of the first byte of the body root to mess up the proof - byte0 := col.SignedBlockHeader.Header.BodyRoot[0] - col.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255 - v = ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid) - require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) - require.NotNil(t, v.results.result(RequireSidecarInclusionProven)) + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarDescendsFromFinalized)) + }) + } } -func TestColumnSidecarInclusionProvenElectra(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - - ini := Initializer{} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarInclusionProven()) - require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) - require.NoError(t, v.results.result(RequireSidecarInclusionProven)) - - // Invert bits of the first byte of the body root to mess up the proof - byte0 := col.SignedBlockHeader.Header.BodyRoot[0] - col.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255 - v = ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid) - require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) - require.NotNil(t, v.results.result(RequireSidecarInclusionProven)) +func TestDataColumnsSidecarInclusionProven(t *testing.T) { + testCases := []struct { + name string + alterate bool + isError bool + }{ + { + name: "Inclusion proven", + alterate: false, + isError: false, + }, + { + name: "Inclusion not proven", + alterate: true, + isError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + if tc.alterate { + firstColumn := columns[0] + byte0 := firstColumn.SignedBlockHeader.Header.BodyRoot[0] + firstColumn.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255 + } + + initializer := Initializer{} + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + err := verifier.SidecarInclusionProven() + require.Equal(t, true, verifier.results.executed(RequireSidecarInclusionProven)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarInclusionProofInvalid) + require.NotNil(t, verifier.results.result(RequireSidecarInclusionProven)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarInclusionProven)) + }) + } } -func TestColumnSidecarKzgProofVerified(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 0, 1) - col := columns[0] - passes := func(vb blocks.RODataColumn) (bool, error) { - require.Equal(t, true, reflect.DeepEqual(col.KzgCommitments, vb.KzgCommitments)) - return true, nil +func TestDataColumnsSidecarKzgProofVerified(t *testing.T) { + testCases := []struct { + isError bool + verifyDataColumnsCommitmentReturn bool + verifyDataColumnsCommitmentError error + name string + }{ + { + name: "KZG proof verified", + verifyDataColumnsCommitmentReturn: true, + verifyDataColumnsCommitmentError: nil, + isError: false, + }, + { + name: "KZG proof error", + verifyDataColumnsCommitmentReturn: false, + verifyDataColumnsCommitmentError: errors.New("KZG proof error"), + isError: true, + }, + { + name: "KZG proof not verified", + verifyDataColumnsCommitmentReturn: false, + verifyDataColumnsCommitmentError: nil, + isError: true, + }, } - v := &RODataColumnVerifier{verifyDataColumnCommitment: passes, results: newResults(), dataColumn: col} - require.NoError(t, v.SidecarKzgProofVerified()) - require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) - require.NoError(t, v.results.result(RequireSidecarKzgProofVerified)) - - fails := func(vb blocks.RODataColumn) (bool, error) { - require.Equal(t, true, reflect.DeepEqual(col.KzgCommitments, vb.KzgCommitments)) - return false, errors.New("bad blob") + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const ( + columnSlot = 0 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] + + verifyDataColumnsCommitment := func(roDataColumns []blocks.RODataColumn) (bool, error) { + for _, roDataColumn := range roDataColumns { + require.Equal(t, true, reflect.DeepEqual(firstColumn.KzgCommitments, roDataColumn.KzgCommitments)) + } + + return tc.verifyDataColumnsCommitmentReturn, tc.verifyDataColumnsCommitmentError + } + + verifier := &RODataColumnsVerifier{ + results: newResults(), + dataColumns: columns, + verifyDataColumnsCommitment: verifyDataColumnsCommitment, + } + + err := verifier.SidecarKzgProofVerified() + require.Equal(t, true, verifier.results.executed(RequireSidecarKzgProofVerified)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarKzgProofInvalid) + require.NotNil(t, verifier.results.result(RequireSidecarKzgProofVerified)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarKzgProofVerified)) + }) } - v = &RODataColumnVerifier{results: newResults(), dataColumn: col, verifyDataColumnCommitment: fails} - require.ErrorIs(t, v.SidecarKzgProofVerified(), ErrSidecarKzgProofInvalid) - require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) - require.NotNil(t, v.results.result(RequireSidecarKzgProofVerified)) } -func TestColumnSidecarProposerExpected(t *testing.T) { - ctx := context.Background() - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 1, 1) - col := columns[0] - t.Run("cached, matches", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(col.ProposerIndex())}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarProposerExpected(ctx)) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NoError(t, v.results.result(RequireSidecarProposerExpected)) - }) - t.Run("cached, does not match", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(col.ProposerIndex() + 1)}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) - }) - t.Run("not cached, state lookup failure", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, col.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) - }) - - t.Run("not cached, proposer matches", func(t *testing.T) { - pc := &mockProposerCache{ - ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { - require.Equal(t, col.ParentRoot(), root) - require.Equal(t, col.Slot(), slot) - return col.ProposerIndex(), nil +func TestDataColumnsSidecarProposerExpected(t *testing.T) { + const ( + columnSlot = 1 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + firstColumn := columns[0] + + _, newColumns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, 2*params.BeaconConfig().SlotsPerEpoch, blobCount) + firstNewColumn := newColumns[0] + + validator := ðpb.Validator{} + + commonComputeProposerCB := func(_ context.Context, root [fieldparams.RootLength]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, firstColumn.ParentRoot(), root) + require.Equal(t, firstColumn.Slot(), slot) + return firstColumn.ProposerIndex(), nil + } + + testCases := []struct { + name string + stateByRooter StateByRooter + proposerCache ProposerCache + columns []blocks.RODataColumn + isError bool + }{ + { + name: "Cached, matches", + stateByRooter: nil, + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()), + }, + columns: columns, + isError: false, + }, + { + name: "Cached, does not match", + stateByRooter: nil, + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1), + }, + columns: columns, + isError: true, + }, + { + name: "Not cached, state lookup failure", + stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + }, + columns: columns, + isError: true, + }, + { + name: "Not cached, proposer matches", + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: commonComputeProposerCB, }, - } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(col.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarProposerExpected(ctx)) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NoError(t, v.results.result(RequireSidecarProposerExpected)) - }) - - t.Run("not cached, proposer matches for next epoch", func(t *testing.T) { - _, newCols := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 2*params.BeaconConfig().SlotsPerEpoch, 1) - - newCol := newCols[0] - pc := &mockProposerCache{ - ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { - require.Equal(t, newCol.ParentRoot(), root) - require.Equal(t, newCol.Slot(), slot) - return col.ProposerIndex(), nil + columns: columns, + isError: false, + }, + { + name: "Not cached, proposer matches", + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: commonComputeProposerCB, }, - } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(newCol.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(newCol, GossipColumnSidecarRequirements) - require.NoError(t, v.SidecarProposerExpected(ctx)) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NoError(t, v.results.result(RequireSidecarProposerExpected)) - }) - t.Run("not cached, proposer does not match", func(t *testing.T) { - pc := &mockProposerCache{ - ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { - require.Equal(t, col.ParentRoot(), root) - require.Equal(t, col.Slot(), slot) - return col.ProposerIndex() + 1, nil + columns: columns, + isError: false, + }, + { + name: "Not cached, proposer matches for next epoch", + stateByRooter: sbrForValOverride(firstNewColumn.ProposerIndex(), validator), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, firstNewColumn.ParentRoot(), root) + require.Equal(t, firstNewColumn.Slot(), slot) + return firstColumn.ProposerIndex(), nil + }, + }, + columns: newColumns, + isError: false, + }, + { + name: "Not cached, proposer does not match", + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, firstColumn.ParentRoot(), root) + require.Equal(t, firstColumn.Slot(), slot) + return firstColumn.ProposerIndex() + 1, nil + }, }, - } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(col.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) - }) - t.Run("not cached, ComputeProposer fails", func(t *testing.T) { - pc := &mockProposerCache{ - ProposerCB: pcReturnsNotFound(), - ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { - require.Equal(t, col.ParentRoot(), root) - require.Equal(t, col.Slot(), slot) - return 0, errors.New("ComputeProposer failed") + columns: columns, + isError: true, + }, + { + name: "Not cached, ComputeProposer fails", + stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator), + proposerCache: &mockProposerCache{ + ProposerCB: pcReturnsNotFound(), + ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) { + require.Equal(t, firstColumn.ParentRoot(), root) + require.Equal(t, firstColumn.Slot(), slot) + return 0, errors.New("ComputeProposer failed") + }, }, - } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(col.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) - require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) - require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) - }) + columns: columns, + isError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + initializer := Initializer{ + shared: &sharedResources{ + sr: tc.stateByRooter, + pc: tc.proposerCache, + fc: &mockForkchoicer{ + TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}), + }, + }, + } + + verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipColumnSidecarRequirements) + err := verifier.SidecarProposerExpected(context.Background()) + + require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected)) + + if tc.isError { + require.ErrorIs(t, err, ErrSidecarUnexpectedProposer) + require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected)) + return + } + + require.NoError(t, err) + require.NoError(t, verifier.results.result(RequireSidecarProposerExpected)) + }) + } } func TestColumnRequirementSatisfaction(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 1, 1) - col := columns[0] - ini := Initializer{} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) + const ( + columnSlot = 1 + blobCount = 1 + ) + + parentRoot := [fieldparams.RootLength]byte{} - _, err := v.VerifiedRODataColumn() + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + initializer := Initializer{} + verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + + _, err := verifier.VerifiedRODataColumns() require.ErrorIs(t, err, ErrColumnInvalid) + var me VerificationMultiError ok := errors.As(err, &me) require.Equal(t, true, ok) fails := me.Failures() - // we haven't performed any verification, so all the results should be this type + + // We haven't performed any verification, so all the results should be this type. for _, v := range fails { require.ErrorIs(t, v, ErrMissingVerification) } - // satisfy everything through the backdoor and ensure we get the verified ro blob at the end + // Satisfy everything through the backdoor and ensure we get the verified ro blob at the end. for _, r := range GossipColumnSidecarRequirements { - v.results.record(r, nil) + verifier.results.record(r, nil) } - require.Equal(t, true, v.results.allSatisfied()) - _, err = v.VerifiedRODataColumn() - require.NoError(t, err) -} -func TestStateCaching(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 1, 1) - col := columns[0] - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(col.ProposerIndex(), ðpb.Validator{})}} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - _, err := v.parentState(context.Background()) - require.NoError(t, err) + require.Equal(t, true, verifier.results.allSatisfied()) + _, err = verifier.VerifiedRODataColumns() - // Utilize the cached state. - v.sr = nil - _, err = v.parentState(context.Background()) require.NoError(t, err) } func TestColumnSatisfyRequirement(t *testing.T) { - _, columns := util.GenerateTestDenebBlockWithColumns(t, [32]byte{}, 1, 1) - col := columns[0] - ini := Initializer{} - v := ini.NewColumnVerifier(col, GossipColumnSidecarRequirements) - require.Equal(t, false, v.results.executed(RequireDataColumnIndexInBounds)) + const ( + columnSlot = 1 + blobCount = 1 + ) + parentRoot := [fieldparams.RootLength]byte{} + + _, columns := util.GenerateTestDenebBlockWithColumns(t, parentRoot, columnSlot, blobCount) + intializer := Initializer{} + + v := intializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements) + require.Equal(t, false, v.results.executed(RequireDataColumnIndexInBounds)) v.SatisfyRequirement(RequireDataColumnIndexInBounds) require.Equal(t, true, v.results.executed(RequireDataColumnIndexInBounds)) } diff --git a/beacon-chain/verification/initializer.go b/beacon-chain/verification/initializer.go index 4e7112b2c90a..246719847340 100644 --- a/beacon-chain/verification/initializer.go +++ b/beacon-chain/verification/initializer.go @@ -58,13 +58,13 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO } } -// NewColumnVerifier creates a DataColumnVerifier for a single data column, with the given set of requirements. -func (ini *Initializer) NewColumnVerifier(d blocks.RODataColumn, reqs []Requirement) *RODataColumnVerifier { - return &RODataColumnVerifier{ - sharedResources: ini.shared, - dataColumn: d, - results: newResults(reqs...), - verifyDataColumnCommitment: peerdas.VerifyDataColumnSidecarKZGProofs, +// NewDataColumnsVerifier creates a DataColumnVerifier for a single data column, with the given set of requirements. +func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier { + return &RODataColumnsVerifier{ + sharedResources: ini.shared, + dataColumns: roDataColumns, + results: newResults(reqs...), + verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs, } } diff --git a/beacon-chain/verification/interface.go b/beacon-chain/verification/interface.go index 19a7607ce67f..53b19090b980 100644 --- a/beacon-chain/verification/interface.go +++ b/beacon-chain/verification/interface.go @@ -30,11 +30,11 @@ type BlobVerifier interface { // able to mock Initializer.NewBlobVerifier without complex setup. type NewBlobVerifier func(b blocks.ROBlob, reqs []Requirement) BlobVerifier -// DataColumnVerifier defines the methods implemented by the RODataColumnVerifier. +// DataColumnsVerifier defines the methods implemented by the RODataColumnVerifier. // It serves a very similar purpose as the blob verifier interface for data columns. -type DataColumnVerifier interface { - VerifiedRODataColumn() (blocks.VerifiedRODataColumn, error) - DataColumnIndexInBounds() (err error) +type DataColumnsVerifier interface { + VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) + DataColumnsIndexInBounds() (err error) NotFromFutureSlot() (err error) SlotAboveFinalized() (err error) ValidProposerSignature(ctx context.Context) (err error) @@ -48,6 +48,6 @@ type DataColumnVerifier interface { SatisfyRequirement(Requirement) } -// NewColumnVerifier is a function signature that can be used to mock a setup where a +// NewDataColumnsVerifier is a function signature that can be used to mock a setup where a // column verifier can be easily initialized. -type NewColumnVerifier func(dc blocks.RODataColumn, reqs []Requirement) DataColumnVerifier +type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier