diff --git a/cmd/integration/commands/stages_zkevm.go b/cmd/integration/commands/stages_zkevm.go index e67466647b3..f3882a293f7 100644 --- a/cmd/integration/commands/stages_zkevm.go +++ b/cmd/integration/commands/stages_zkevm.go @@ -140,7 +140,6 @@ func newSyncZk(ctx context.Context, db kv.RwDB) (consensus.Engine, *vm.Config, * nil, nil, nil, - nil, ) } else { stages = stages2.NewDefaultZkStages( @@ -157,6 +156,7 @@ func newSyncZk(ctx context.Context, db kv.RwDB) (consensus.Engine, *vm.Config, * nil, nil, nil, + nil, nil) } diff --git a/eth/backend.go b/eth/backend.go index b241f5fc14c..93e25a55832 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -1035,31 +1035,27 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Info("Starting sequencer in L1 recovery mode", "startBlock", cfg.L1SyncStartBlock) } - seqAndVerifTopics := [][]libcommon.Hash{{ - contracts.SequencedBatchTopicPreEtrog, - contracts.SequencedBatchTopicEtrog, - contracts.RollbackBatchesTopic, - contracts.VerificationTopicPreEtrog, - contracts.VerificationTopicEtrog, - contracts.VerificationValidiumTopicEtrog, - }} - - seqAndVerifL1Contracts := []libcommon.Address{cfg.AddressRollup, cfg.AddressAdmin, cfg.AddressZkevm} - - var l1Topics [][]libcommon.Hash - var l1Contracts []libcommon.Address - if isSequencer { - l1Topics = [][]libcommon.Hash{{ + combinedL1Topics := [][]libcommon.Hash{ + { + contracts.SequencedBatchTopicPreEtrog, + contracts.SequencedBatchTopicEtrog, + contracts.RollbackBatchesTopic, + contracts.VerificationTopicPreEtrog, + contracts.VerificationTopicEtrog, + contracts.VerificationValidiumTopicEtrog, contracts.InitialSequenceBatchesTopic, contracts.AddNewRollupTypeTopic, contracts.AddNewRollupTypeTopicBanana, contracts.CreateNewRollupTopic, contracts.UpdateRollupTopic, - }} - l1Contracts = []libcommon.Address{cfg.AddressZkevm, cfg.AddressRollup} - } else { - l1Topics = seqAndVerifTopics - l1Contracts = seqAndVerifL1Contracts + contracts.SequenceBatchesTopic, + }, + } + + combinedL1Contracts := []libcommon.Address{ + cfg.AddressRollup, + cfg.AddressAdmin, + cfg.AddressZkevm, } ethermanClients := make([]syncer.IEtherman, len(backend.etherManClients)) @@ -1067,21 +1063,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger ethermanClients[i] = c.EthClient } - seqVerSyncer := syncer.NewL1Syncer( - ctx, - ethermanClients, - seqAndVerifL1Contracts, - seqAndVerifTopics, - cfg.L1BlockRange, - cfg.L1QueryDelay, - cfg.L1HighestBlockType, - ) - backend.l1Syncer = syncer.NewL1Syncer( ctx, ethermanClients, - l1Contracts, - l1Topics, + combinedL1Contracts, + combinedL1Topics, cfg.L1BlockRange, cfg.L1QueryDelay, cfg.L1HighestBlockType, @@ -1153,18 +1139,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // we switch context from being an RPC node to a sequencer backend.txPool2.ForceUpdateLatestBlock(executionProgress) - l1BlockSyncer := syncer.NewL1Syncer( - ctx, - ethermanClients, - []libcommon.Address{cfg.AddressZkevm, cfg.AddressRollup}, - [][]libcommon.Hash{{ - contracts.SequenceBatchesTopic, - }}, - cfg.L1BlockRange, - cfg.L1QueryDelay, - cfg.L1HighestBlockType, - ) - backend.syncStages = stages2.NewSequencerZkStages( backend.sentryCtx, backend.chainDB, @@ -1178,8 +1152,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.engine, dataStreamServer, backend.l1Syncer, - seqVerSyncer, - l1BlockSyncer, backend.txPool2, backend.txPool2DB, verifier, @@ -1217,6 +1189,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.forkValidator, backend.engine, backend.l1Syncer, + l1InfoTreeUpdater, streamClient, dataStreamServer, l1InfoTreeUpdater, diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index c925b33f182..740f9718542 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -485,7 +485,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash("0xebf7acfdbfb4ea6ef8775e7d2246d7d4f3d8c280fbe7649e1b90eb7490ea19e6"), batch.AccInputHash) + assert.Equal(common.HexToHash("0x4ada34202c254799e2abc9f832764692c2fc6f654fa98a1f9c19391deff623bd"), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 3d60972ae0e..c00c4e77589 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -35,6 +35,7 @@ func NewDefaultZkStages(ctx context.Context, forkValidator *engine_helpers.ForkValidator, engine consensus.Engine, l1Syncer *syncer.L1Syncer, + updater *l1infotree.Updater, datastreamClient zkStages.DatastreamClient, dataStreamServer server.DataStreamServer, infoTreeUpdater *l1infotree.Updater, @@ -51,7 +52,7 @@ func NewDefaultZkStages(ctx context.Context, runInTestMode := cfg.ImportMode return zkStages.DefaultZkStages(ctx, - zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), + zkStages.StageL1SyncerCfg(db, l1Syncer, updater, cfg.Zk), zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner), zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), @@ -101,9 +102,7 @@ func NewSequencerZkStages(ctx context.Context, forkValidator *engine_helpers.ForkValidator, engine consensus.Engine, dataStreamServer server.DataStreamServer, - sequencerStageSyncer *syncer.L1Syncer, l1Syncer *syncer.L1Syncer, - l1BlockSyncer *syncer.L1Syncer, txPool *txpool.TxPool, txPoolDb kv.RwDB, verifier *legacy_executor_verifier.LegacyExecutorVerifier, @@ -117,9 +116,7 @@ func NewSequencerZkStages(ctx context.Context, runInTestMode := cfg.ImportMode return zkStages.SequencerZkStages(ctx, - zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), - zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), - zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), + zkStages.StageL1SyncerCfg(db, l1Syncer, infoTreeUpdater, cfg.Zk), zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), zkStages.StageSequenceBlocksCfg( db, diff --git a/zk/acc_input_hash/acc_input_hash.go b/zk/acc_input_hash/acc_input_hash.go index a1c18eb3494..aa898f8d0f1 100644 --- a/zk/acc_input_hash/acc_input_hash.go +++ b/zk/acc_input_hash/acc_input_hash.go @@ -22,7 +22,7 @@ We read the inforoot from the s/c in state. Etrog (forkid 7): - limitTs the timestamp of the l1 block when the batch was sequenced. Elderberry (forkid 8): - - limitTs is the timestamp of the last block in the sequenced batch. + - limitTs is the timestamp of the last block in the sequence. */ const SpecialZeroHash = "0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757" @@ -110,6 +110,8 @@ func NewCalculatorWithBlockReader(ctx context.Context, tx kv.Tx, reader AccInput return calcConstructor(baseCalc), nil } +// PRE-ETROG (forkid < 7) Calculator: UNSUPPORTED + type PreFork7Calculator struct { *BaseCalc } @@ -123,6 +125,8 @@ func (p PreFork7Calculator) Calculate(batchNum uint64) (common.Hash, error) { return common.Hash{}, nil } +// ETROG (forkid 7) Calculator + type Fork7Calculator struct { *BaseCalc } @@ -274,6 +278,8 @@ func (f Fork7Calculator) localAccInputHashCalc(batchNum, startBatchNo uint64, pr return accInputHash, nil } +// ELDERBERRY (forkid 8) Calculator + type Fork8Calculator struct { *BaseCalc } @@ -283,9 +289,143 @@ func NewFork8Calculator(bc *BaseCalc) AccInputHashCalculator { } func (f Fork8Calculator) Calculate(batchNum uint64) (common.Hash, error) { - return common.Hash{}, nil + accInputHash, returnedBatchNo, err := f.Reader.GetAccInputHashForBatchOrPrevious(batchNum) + if err != nil { + return common.Hash{}, err + } + + // check the forkid of the returnedBatchNo + forkId, err := f.Reader.GetForkId(returnedBatchNo) + if err != nil { + return common.Hash{}, err + } + + if forkId < uint64(chain.ForkID7Etrog) { + return common.Hash{}, fmt.Errorf("unsupported fork ID: %d", forkId) + } + + // TODO: remove test spoooofing! (1001 and 997 are l1 held batch accinputhashes - sequence ends) + if batchNum >= 7000 { + // let's just spoof it backwards: + accInputHash, returnedBatchNo, err = f.Reader.GetAccInputHashForBatchOrPrevious(7000) + if err != nil { + return common.Hash{}, err + } + } + + // if we have it, return it + if returnedBatchNo == batchNum { + return accInputHash, nil + } + + // otherwise calculate it + accInputHash, err = f.localAccInputHashCalc(batchNum, returnedBatchNo, accInputHash) + if err != nil { + return common.Hash{}, err + } + + return accInputHash, nil } func (f Fork8Calculator) localAccInputHashCalc(batchNum, startBatchNo uint64, prevAccInputHash common.Hash) (common.Hash, error) { - return common.Hash{}, nil + var accInputHash common.Hash + + infoTreeIndexes, err := f.Reader.GetL1InfoTreeIndexToRoots() + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get l1 info tree indexes: %w", err) + } + if len(infoTreeIndexes) == 0 { + return common.Hash{}, fmt.Errorf("no l1 info tree indexes found") + } + + // go from injected batch with known batch 0 accinputhash of 0x0...0 + if startBatchNo == 0 { + startBatchNo = 1 + } + + // Elderberry limitTS is the highest L2 block TS + // get the highest l2 block in the sequence (highest batch) + blockNos, err := f.Reader.GetL2BlockNosByBatch(batchNum) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get l2 block nos by batch for batch %d: %w", batchNum, err) + } + seqHighestBlockNo := blockNos[len(blockNos)-1] + seqHighestBlock, err := f.BlockReader.ReadBlockByNumber(seqHighestBlockNo) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get highest block for batch %d: %w", batchNum, err) + } + limitTs := seqHighestBlock.Time() + + // TODO: handle batch 1 case where we should get check the aggregator code: https://github.com/0xPolygon/cdk/blob/develop/aggregator/aggregator.go#L1167 + + for i := startBatchNo; i <= batchNum; i++ { + currentForkId, err := f.Reader.GetForkId(i) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get fork id for batch %d: %w", i, err) + } + + batchBlockNos, err := f.Reader.GetL2BlockNosByBatch(i) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get batch blocks for batch %d: %w", i, err) + } + batchBlocks := []*eritypes.Block{} + var coinbase common.Address + for in, blockNo := range batchBlockNos { + block, err := f.BlockReader.ReadBlockByNumber(blockNo) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get block %d: %w", blockNo, err) + } + if in == 0 { + coinbase = block.Coinbase() + } + batchBlocks = append(batchBlocks, block) + } + + lastBlockNoInPreviousBatch := uint64(0) + firstBlockInBatch := batchBlocks[0] + if firstBlockInBatch.NumberU64() != 0 { + lastBlockNoInPreviousBatch = firstBlockInBatch.NumberU64() - 1 + } + + lastBlockInPreviousBatch, err := f.BlockReader.ReadBlockByNumber(lastBlockNoInPreviousBatch) + if err != nil { + return common.Hash{}, err + } + + batchL2Data, err := utils.GenerateBatchDataFromDb(f.Tx, f.Reader, batchBlocks, lastBlockInPreviousBatch, currentForkId) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to generate batch data for batch %d: %w", i, err) + } + + highestBlock := batchBlocks[len(batchBlocks)-1] + + sr := state.NewPlainState(f.Tx, highestBlock.NumberU64(), systemcontracts.SystemContractCodeLookup["hermez"]) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get psr: %w", err) + } + l1InfoRootBytes, err := sr.ReadAccountStorage(state.ADDRESS_SCALABLE_L2, 1, &state.BLOCK_INFO_ROOT_STORAGE_POS) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to read l1 info root: %w", err) + } + sr.Close() + l1InfoRoot := common.BytesToHash(l1InfoRootBytes) + + fmt.Println("[l1InfoRoot]", l1InfoRoot.Hex()) + fmt.Println("[limitTs]", limitTs) + + inputs := utils.AccHashInputs{ + OldAccInputHash: prevAccInputHash, + Sequencer: coinbase, + BatchData: batchL2Data, + L1InfoRoot: l1InfoRoot, + LimitTimestamp: limitTs, + ForcedBlockHash: common.Hash{}, + } + accInputHash, err = utils.CalculateAccInputHashByForkId(inputs) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to calculate accInputHash for batch %d: %w", i, err) + } + prevAccInputHash = accInputHash + } + return accInputHash, nil } diff --git a/zk/acc_input_hash/acc_input_hash_test.go b/zk/acc_input_hash/acc_input_hash_test.go index ba869351abc..1548768e76e 100644 --- a/zk/acc_input_hash/acc_input_hash_test.go +++ b/zk/acc_input_hash/acc_input_hash_test.go @@ -38,6 +38,11 @@ type MockAccInputHashReader struct { EffectiveGasPricePercentages map[common.Hash]uint8 } +// GetHighestL1BlockTimestamp implements AccInputHashReader. +func (m *MockAccInputHashReader) GetHighestL1BlockTimestamp() (batchNo uint64, timestamp uint64, found bool, err error) { + return 0, 0, false, nil +} + func (m *MockAccInputHashReader) GetAccInputHashForBatchOrPrevious(batchNo uint64) (common.Hash, uint64, error) { for i := batchNo; i >= 0; i-- { if hash, ok := m.AccInputHashes[i]; ok { @@ -125,7 +130,7 @@ func createMockBlock(blockNo uint64) *eritypes.Block { return eritypes.NewBlock(header, txs, nil, nil, nil) } -func TestCalculateAccInputHash(t *testing.T) { +func TestCalculateAccInputHashFork7(t *testing.T) { ctx := context.Background() testCases := map[string]struct { @@ -136,7 +141,7 @@ func TestCalculateAccInputHash(t *testing.T) { expectedErrorMsg string setup func(*testing.T) (*MockAccInputHashReader, *MockBlockReader) }{ - "Valid Fork7, Existing Batch": { + "Valid Fork7 Existing Batch": { forkID: 7, batchNum: 4, expectedHash: common.HexToHash("0xbbb"), @@ -156,7 +161,7 @@ func TestCalculateAccInputHash(t *testing.T) { "Valid Fork7 Missing Batch Calculate Hash": { forkID: 7, batchNum: 5, - expectedHash: common.HexToHash("0xb370e69e462a8a00469cb0ce188399a9754880dfd8ebd98717e24cbe1103efa6"), + expectedHash: common.HexToHash("0x68f8d34596fff903fd9bdd79d3165b3120890b11945f815d618cf5988ea016fd"), expectError: false, setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { reader := &MockAccInputHashReader{ @@ -202,10 +207,10 @@ func TestCalculateAccInputHash(t *testing.T) { return reader, mockBlockReader }, }, - "Valid Fork7, No Previous Batch": { + "Valid Fork7 No Previous Batch": { forkID: 7, batchNum: 2, - expectedHash: common.HexToHash("0x0cd77f88e7eeeef006fa44caaf24baab7a1b46321e26a9fa28f943a293a8811e"), + expectedHash: common.HexToHash("0x85cb929ce799607e2b410964f39d5bd75c3a0552cf31ed92733e7e8526cb3ef5"), expectError: false, setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { reader := &MockAccInputHashReader{ @@ -311,3 +316,153 @@ func TestCalculateAccInputHash(t *testing.T) { }) } } + +func TestCalculateAccInputHashFork8(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + forkID uint64 + batchNum uint64 + expectedHash common.Hash + expectError bool + expectedErrorMsg string + setup func(*testing.T) (*MockAccInputHashReader, *MockBlockReader) + }{ + "Valid Fork8 Single Batch": { + forkID: 8, + batchNum: 6, + expectedHash: common.HexToHash("0xabc123"), + expectError: false, + setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { + reader := &MockAccInputHashReader{ + AccInputHashes: map[uint64]common.Hash{ + 6: common.HexToHash("0xabc123"), + }, + ForkIds: map[uint64]uint64{ + 6: 8, + }, + L2BlockNosByBatch: map[uint64][]uint64{ + 6: {60}, + }, + } + blocks := map[uint64]*eritypes.Block{ + 60: createMockBlock(60), + } + mockBlockReader := &MockBlockReader{ + blocks: blocks, + } + return reader, mockBlockReader + }, + }, + "Valid Fork8 Compute Missing Batch": { + forkID: 8, + batchNum: 8, + expectedHash: common.HexToHash("0x9778a8f619b80ee196d570427eee089679065c34e8bc815015e286113cd5bbb4"), + expectError: false, + setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { + reader := &MockAccInputHashReader{ + AccInputHashes: map[uint64]common.Hash{ + 7: common.HexToHash("0x123456"), + }, + ForkIds: map[uint64]uint64{ + 7: 8, + 8: 8, + }, + L2BlockNosByBatch: map[uint64][]uint64{ + 7: {69, 70}, + 8: {80, 81}, + }, + BlockGlobalExitRoots: map[uint64]common.Hash{ + 81: common.HexToHash("0x6789"), + }, + L1InfoTreeIndexToRoots: map[uint64]common.Hash{ + 0: common.HexToHash("0x9abc"), + }, + } + blocks := map[uint64]*eritypes.Block{ + 68: createMockBlock(68), + 69: createMockBlock(69), + 70: createMockBlock(70), + 71: createMockBlock(71), + 72: createMockBlock(72), + 73: createMockBlock(73), + 74: createMockBlock(74), + 75: createMockBlock(75), + 76: createMockBlock(76), + 77: createMockBlock(77), + 78: createMockBlock(78), + 79: createMockBlock(79), + 80: createMockBlock(80), + 81: createMockBlock(81), + } + mockBlockReader := &MockBlockReader{ + blocks: blocks, + } + return reader, mockBlockReader + }, + }, + "Fork8 Block Reader Failure": { + forkID: 8, + batchNum: 9, + expectError: true, + expectedErrorMsg: "fork ID not found for batch 0", + setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { + reader := &MockAccInputHashReader{ + ForkIds: map[uint64]uint64{ + 9: 8, + }, + L2BlockNosByBatch: map[uint64][]uint64{ + 9: {90}, + }, + } + return reader, &MockBlockReader{} + }, + }, + "Fork8 Unsupported Previous Fork ID": { + forkID: 8, + batchNum: 12, + expectError: true, + expectedErrorMsg: "unsupported fork ID: 6", + setup: func(t *testing.T) (*MockAccInputHashReader, *MockBlockReader) { + reader := &MockAccInputHashReader{ + AccInputHashes: map[uint64]common.Hash{ + 11: common.HexToHash("0xabcdef"), + }, + ForkIds: map[uint64]uint64{ + 11: 6, + 12: 8, + }, + } + return reader, nil + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + tx, cleanup := GetDbTx(ctx) + reader, mockBlockReader := tc.setup(t) + calculator, err := NewCalculatorWithBlockReader(ctx, tx, reader, mockBlockReader, tc.forkID) + if err != nil { + if tc.expectError { + assert.EqualError(t, err, tc.expectedErrorMsg) + } else { + require.NoError(t, err) + } + return + } + + hash, err := calculator.Calculate(tc.batchNum) + if tc.expectError { + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedErrorMsg) + assert.Equal(t, common.Hash{}, hash) + } else { + t.Log(hash.Hex()) + require.NoError(t, err) + assert.Equal(t, tc.expectedHash, hash) + } + cleanup() + }) + } +} diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 8cb4b14ff0d..3fe7c4fac25 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -1924,7 +1924,7 @@ func (db *HermezDbReader) GetBatchLimitsBetweenForkIds(forkId1, forkId2 uint64) forkId := BytesToUint64(v[:8]) batchNo := BytesToUint64(v[8:]) - if forkId == forkId1 { + if forkId > 0 && forkId == forkId1 { firstBatchNo = batchNo } diff --git a/zk/l1_log_parser/l1_log_parser.go b/zk/l1_log_parser/l1_log_parser.go index 78b73569ce1..7d85dec11d2 100644 --- a/zk/l1_log_parser/l1_log_parser.go +++ b/zk/l1_log_parser/l1_log_parser.go @@ -16,6 +16,7 @@ type IL1Syncer interface { } type IHermezDb interface { + GetForkFromRollupType(rollupType uint64) (uint64, error) WriteSequence(l1BlockNo uint64, batchNo uint64, l1TxHash common.Hash, stateRoot common.Hash, l1InfoRoot common.Hash) error WriteVerification(l1BlockNo uint64, batchNo uint64, l1TxHash common.Hash, stateRoot common.Hash) error WriteRollupType(rollupType uint64, forkId uint64) error @@ -117,12 +118,17 @@ func (p *L1LogParser) parseLogType(l *ethTypes.Log) (parsedLog interface{}, logT case contracts.CreateNewRollupTopic: rollupId := new(big.Int).SetBytes(l.Topics[1].Bytes()).Uint64() + if rollupId != p.L1RollupId { + return nil, types.LogUnknown, nil + } return types.RollupUpdateInfo{ NewRollup: rollupId, RollupType: new(big.Int).SetBytes(l.Data[0:32]).Uint64(), }, types.LogRollupCreate, nil case contracts.AddNewRollupTypeTopic: + fallthrough + case contracts.AddNewRollupTypeTopicBanana: return types.RollupUpdateInfo{ RollupType: new(big.Int).SetBytes(l.Topics[1].Bytes()).Uint64(), @@ -130,10 +136,25 @@ func (p *L1LogParser) parseLogType(l *ethTypes.Log) (parsedLog interface{}, logT }, types.LogAddRollupType, nil case contracts.UpdateRollupTopic: + rollupId := new(big.Int).SetBytes(l.Topics[1].Bytes()).Uint64() + if rollupId != p.L1RollupId { + return nil, types.LogUnknown, nil + } + + newRollupBytes := l.Data[0:32] + newRollup := new(big.Int).SetBytes(newRollupBytes).Uint64() + + latestVerifiedBytes := l.Data[32:64] + latestVerified := new(big.Int).SetBytes(latestVerifiedBytes).Uint64() + return types.RollupUpdateInfo{ - NewRollup: new(big.Int).SetBytes(l.Data[0:32]).Uint64(), - LatestVerified: new(big.Int).SetBytes(l.Data[32:64]).Uint64(), - }, types.LogL1InfoTreeUpdate, nil + NewRollup: newRollup, + LatestVerified: latestVerified, + }, types.LogUpdateRollup, nil + + case contracts.UpdateL1InfoTreeTopic: + // This case is handled by the l1infotree updater code + return nil, types.LogUnknown, nil case contracts.RollbackBatchesTopic: return types.BatchVerificationInfo{ @@ -145,8 +166,6 @@ func (p *L1LogParser) parseLogType(l *ethTypes.Log) (parsedLog interface{}, logT BaseBatchInfo: baseInfo, }, types.LogUnknown, nil } - - return nil, types.LogUnknown, nil } func (p *L1LogParser) handleLog( @@ -208,8 +227,24 @@ func (p *L1LogParser) handleLog( return syncMeta, p.HermezDb.RollbackSequences(info.BatchNo) case types.LogInjectedBatch: - info := l.(*types.L1InjectedBatch) - return syncMeta, p.HermezDb.WriteL1InjectedBatch(info) + info := l.(types.L1InjectedBatch) + return syncMeta, p.HermezDb.WriteL1InjectedBatch(&info) + + case types.LogUpdateRollup: + info := l.(types.RollupUpdateInfo) + fork, err := p.HermezDb.GetForkFromRollupType(info.NewRollup) + if err != nil { + return syncMeta, err + } + if fork == 0 { + log.Warn("received UpdateRollupTopic for unknown rollup type", "rollup", info.NewRollup) + return syncMeta, nil + } + + return syncMeta, p.HermezDb.WriteNewForkHistory(fork, info.LatestVerified) + + case types.LogUnknown: + return syncMeta, nil default: log.Warn("Unknown log type", "logType", logType) diff --git a/zk/l1_log_parser/l1_log_parser_test.go b/zk/l1_log_parser/l1_log_parser_test.go index cea4ece1444..acde37d3360 100644 --- a/zk/l1_log_parser/l1_log_parser_test.go +++ b/zk/l1_log_parser/l1_log_parser_test.go @@ -250,7 +250,6 @@ func TestParseAndHandleLog(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - // Setup mocks hermezDbMock := &MockHermezDb{} l1SyncerMock := &MockIL1Syncer{} @@ -271,14 +270,12 @@ func TestParseAndHandleLog(t *testing.T) { resultSyncMeta, err := parser.ParseAndHandleLog(tc.log, syncMeta) - // Check error if tc.expectedError != nil { require.ErrorIs(t, err, tc.expectedError) } else { require.NoError(t, err) } - // Check syncMeta require.Equal(t, tc.expectedSyncMeta, resultSyncMeta) }) } diff --git a/zk/stages/stage_l1_syncer.go b/zk/stages/stage_l1_syncer.go index 628671adb0a..cacfa5429c4 100644 --- a/zk/stages/stage_l1_syncer.go +++ b/zk/stages/stage_l1_syncer.go @@ -21,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1_log_parser" + "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/sequencer" "github.com/ledgerwatch/erigon/zk/types" ) @@ -53,20 +54,27 @@ var ( ) type L1SyncerCfg struct { - db kv.RwDB - syncer IL1Syncer + db kv.RwDB + syncer IL1Syncer + updater *l1infotree.Updater zkCfg *ethconfig.Zk } -func StageL1SyncerCfg(db kv.RwDB, syncer IL1Syncer, zkCfg *ethconfig.Zk) L1SyncerCfg { +func StageL1SyncerCfg(db kv.RwDB, syncer IL1Syncer, updater *l1infotree.Updater, zkCfg *ethconfig.Zk) L1SyncerCfg { return L1SyncerCfg{ - db: db, - syncer: syncer, - zkCfg: zkCfg, + db: db, + syncer: syncer, + updater: updater, + zkCfg: zkCfg, } } +/* +1. get the l1 info tree updates (commit tx in first cycle) +2. start the syncer for all other contracts/topics +3. parse logs and handle them +*/ func SpawnStageL1Syncer( s *stagedsync.StageState, u stagedsync.Unwinder, @@ -83,10 +91,6 @@ func SpawnStageL1Syncer( logPrefix := s.LogPrefix() log.Info(fmt.Sprintf("[%s] Starting L1 sync stage", logPrefix)) - // if sequencer.IsSequencer() { - // log.Info(fmt.Sprintf("[%s] skipping -- sequencer", logPrefix)) - // return nil - // } defer log.Info(fmt.Sprintf("[%s] Finished L1 sync stage ", logPrefix)) var internalTxOpened bool @@ -101,7 +105,37 @@ func SpawnStageL1Syncer( defer tx.Rollback() } - // pass tx to the hermezdb + // l1 info tree first + if err := cfg.updater.WarmUp(tx); err != nil { + return fmt.Errorf("cfg.updater.WarmUp: %w", err) + } + + allLogs, err := cfg.updater.CheckForInfoTreeUpdates(logPrefix, tx) + if err != nil { + return fmt.Errorf("CheckForInfoTreeUpdates: %w", err) + } + + var latestIndex uint64 + latestUpdate := cfg.updater.GetLatestUpdate() + if latestUpdate != nil { + latestIndex = latestUpdate.Index + } + log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(allLogs), "latestIndex", latestIndex) + + // commit tx if it was opened + if internalTxOpened { + log.Debug("l1 sync: first cycle, committing tx") + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) + } + defer tx.Rollback() + } + + // pass tx to the hermezdb hermezDb := hermez_db.NewHermezDb(tx) // get l1 block progress from this stage's progress @@ -110,6 +144,14 @@ func SpawnStageL1Syncer( return fmt.Errorf("GetStageProgress, %w", err) } + l1BlockProgressSeqOldStage, err := stages.GetStageProgress(tx, stages.L1SequencerSync) + if err != nil { + return fmt.Errorf("GetStageProgress, %w", err) + } + + // [seq] - use sequencer stage progress - take the lowest progress to use as the starting point + l1BlockProgress = min(l1BlockProgress, l1BlockProgressSeqOldStage) + // start syncer if not started if !cfg.syncer.IsSyncStarted() { if l1BlockProgress == 0 { @@ -166,16 +208,16 @@ Loop: } // // do this separately to allow upgrading nodes to back-fill the table - // err = getAccInputHashes(ctx, logPrefix, hermezDb, cfg.syncer, &cfg.zkCfg.AddressRollup, cfg.zkCfg.L1RollupId, highestVerification.BatchNo) - // if err != nil { - // return fmt.Errorf("getAccInputHashes: %w", err) - // } + err = getAccInputHashes(ctx, logPrefix, hermezDb, cfg.syncer, &cfg.zkCfg.AddressRollup, cfg.zkCfg.L1RollupId, syncMeta.HighestVerification.BatchNo) + if err != nil { + return fmt.Errorf("getAccInputHashes: %w", err) + } - // // get l1 block timestamps - // err = getL1BlockTimestamps(ctx, logPrefix, hermezDb, cfg.syncer, &cfg.zkCfg.AddressRollup, cfg.zkCfg.L1RollupId, highestVerification.BatchNo) - // if err != nil { - // return fmt.Errorf("getL1BlockTimestamps: %w", err) - // } + // get l1 block timestamps + err = getL1BlockTimestamps(ctx, logPrefix, hermezDb, cfg.syncer, &cfg.zkCfg.AddressRollup, cfg.zkCfg.L1RollupId, syncMeta.HighestVerification.BatchNo) + if err != nil { + return fmt.Errorf("getL1BlockTimestamps: %w", err) + } latestCheckedBlock := cfg.syncer.GetLastCheckedL1Block() @@ -187,6 +229,10 @@ Loop: if err := stages.SaveStageProgress(tx, stages.L1Syncer, syncMeta.HighestWrittenL1BlockNo); err != nil { return fmt.Errorf("SaveStageProgress: %w", err) } + // [seq] - use sequencer stage progress + if err := stages.SaveStageProgress(tx, stages.L1SequencerSync, syncMeta.HighestWrittenL1BlockNo); err != nil { + return fmt.Errorf("SaveStageProgress: %w", err) + } if syncMeta.HighestVerification.BatchNo > 0 { log.Info(fmt.Sprintf("[%s]", logPrefix), "highestVerificationBatchNo", syncMeta.HighestVerification.BatchNo) if err := stages.SaveStageProgress(tx, stages.L1VerificationsBatchNo, syncMeta.HighestVerification.BatchNo); err != nil { @@ -452,7 +498,9 @@ func getL1BlockTimestamps(ctx context.Context, logPrefix string, hermezDb *herme return fmt.Errorf("GetEtrog7FirstAndLastBatchNubmers: %w", err) } - if firstBatchNo == 0 || lastBatchNo == 0 { + // we can't detect the start of etrog, but we can detect the end + + if lastBatchNo == 0 { log.Info(fmt.Sprintf("[%s] No etrog or elderberry batches found, skipping L1 block timestamp retrieval", logPrefix)) return nil } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index 51b7fc6d72a..74c9ef9d066 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -20,8 +20,6 @@ var ( func SequencerZkStages( ctx context.Context, l1SyncerCfg L1SyncerCfg, - l1InfoTreeCfg L1InfoTreeCfg, - sequencerL1BlockSyncCfg SequencerL1BlockSyncCfg, dataStreamCatchupCfg DataStreamCatchupCfg, exec SequenceBlockCfg, hashState stages.HashStateCfg, @@ -50,32 +48,6 @@ func SequencerZkStages( return PruneL1SyncerStage(p, tx, l1SyncerCfg, ctx) }, }, - { - ID: stages2.L1InfoTree, - Description: "L1 Info tree index updates sync", - Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, txc wrap.TxContainer, logger log.Logger) error { - return SpawnL1InfoTreeStage(s, u, txc.Tx, l1InfoTreeCfg, ctx, logger) - }, - Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindL1InfoTreeStage(u, txc.Tx, l1InfoTreeCfg, ctx) - }, - Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { - return PruneL1InfoTreeStage(p, tx, l1InfoTreeCfg, ctx) - }, - }, - { - ID: stages2.L1BlockSync, - Description: "L1 Sequencer L1 Block Sync", - Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, unwinder stages.Unwinder, txc wrap.TxContainer, logger log.Logger) error { - return SpawnSequencerL1BlockSyncStage(s, unwinder, ctx, txc.Tx, sequencerL1BlockSyncCfg, logger) - }, - Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindSequencerL1BlockSyncStage(u, txc.Tx, sequencerL1BlockSyncCfg, ctx) - }, - Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { - return PruneSequencerL1BlockSyncStage(p, tx, sequencerL1BlockSyncCfg, ctx, logger) - }, - }, { ID: stages2.Execution, Description: "Sequence transactions", @@ -244,19 +216,6 @@ func DefaultZkStages( return PruneL1SyncerStage(p, tx, l1SyncerCfg, ctx) }, }, - { - ID: stages2.L1InfoTree, - Description: "L1 Info tree index updates sync", - Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, txc wrap.TxContainer, logger log.Logger) error { - return SpawnL1InfoTreeStage(s, u, txc.Tx, l1InfoTreeCfg, ctx, logger) - }, - Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindL1InfoTreeStage(u, txc.Tx, l1InfoTreeCfg, ctx) - }, - Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { - return PruneL1InfoTreeStage(p, tx, l1InfoTreeCfg, ctx) - }, - }, { ID: stages2.Batches, Description: "Download batches", diff --git a/zk/types/zk_types.go b/zk/types/zk_types.go index 4540393321d..4090e7891de 100644 --- a/zk/types/zk_types.go +++ b/zk/types/zk_types.go @@ -66,6 +66,7 @@ var ( LogInjectedBatch BatchLogType = 7 LogAddRollupType BatchLogType = 8 LogRollupCreate BatchLogType = 9 + LogUpdateRollup BatchLogType = 10 LogIncompatible BatchLogType = 100 )