Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
revitteth committed Dec 12, 2024
1 parent dd84692 commit 4aca241
Show file tree
Hide file tree
Showing 12 changed files with 440 additions and 135 deletions.
2 changes: 1 addition & 1 deletion cmd/integration/commands/stages_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ func newSyncZk(ctx context.Context, db kv.RwDB) (consensus.Engine, *vm.Config, *
nil,
nil,
nil,

Check failure on line 142 in cmd/integration/commands/stages_zkevm.go

View workflow job for this annotation

GitHub Actions / tests (ubuntu-22.04)

too many arguments in call to stages2.NewSequencerZkStages

Check failure on line 142 in cmd/integration/commands/stages_zkevm.go

View workflow job for this annotation

GitHub Actions / tests (macos-14-xlarge)

too many arguments in call to stages2.NewSequencerZkStages
nil,
)
} else {
stages = stages2.NewDefaultZkStages(
Expand All @@ -157,6 +156,7 @@ func newSyncZk(ctx context.Context, db kv.RwDB) (consensus.Engine, *vm.Config, *
nil,
nil,
nil,
nil,
nil)
}

Expand Down
65 changes: 19 additions & 46 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -1035,53 +1035,39 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
log.Info("Starting sequencer in L1 recovery mode", "startBlock", cfg.L1SyncStartBlock)
}

seqAndVerifTopics := [][]libcommon.Hash{{
contracts.SequencedBatchTopicPreEtrog,
contracts.SequencedBatchTopicEtrog,
contracts.RollbackBatchesTopic,
contracts.VerificationTopicPreEtrog,
contracts.VerificationTopicEtrog,
contracts.VerificationValidiumTopicEtrog,
}}

seqAndVerifL1Contracts := []libcommon.Address{cfg.AddressRollup, cfg.AddressAdmin, cfg.AddressZkevm}

var l1Topics [][]libcommon.Hash
var l1Contracts []libcommon.Address
if isSequencer {
l1Topics = [][]libcommon.Hash{{
combinedL1Topics := [][]libcommon.Hash{
{
contracts.SequencedBatchTopicPreEtrog,
contracts.SequencedBatchTopicEtrog,
contracts.RollbackBatchesTopic,
contracts.VerificationTopicPreEtrog,
contracts.VerificationTopicEtrog,
contracts.VerificationValidiumTopicEtrog,
contracts.InitialSequenceBatchesTopic,
contracts.AddNewRollupTypeTopic,
contracts.AddNewRollupTypeTopicBanana,
contracts.CreateNewRollupTopic,
contracts.UpdateRollupTopic,
}}
l1Contracts = []libcommon.Address{cfg.AddressZkevm, cfg.AddressRollup}
} else {
l1Topics = seqAndVerifTopics
l1Contracts = seqAndVerifL1Contracts
contracts.SequenceBatchesTopic,
},
}

combinedL1Contracts := []libcommon.Address{
cfg.AddressRollup,
cfg.AddressAdmin,
cfg.AddressZkevm,
}

ethermanClients := make([]syncer.IEtherman, len(backend.etherManClients))
for i, c := range backend.etherManClients {
ethermanClients[i] = c.EthClient
}

seqVerSyncer := syncer.NewL1Syncer(
ctx,
ethermanClients,
seqAndVerifL1Contracts,
seqAndVerifTopics,
cfg.L1BlockRange,
cfg.L1QueryDelay,
cfg.L1HighestBlockType,
)

backend.l1Syncer = syncer.NewL1Syncer(
ctx,
ethermanClients,
l1Contracts,
l1Topics,
combinedL1Contracts,
combinedL1Topics,
cfg.L1BlockRange,
cfg.L1QueryDelay,
cfg.L1HighestBlockType,
Expand Down Expand Up @@ -1153,18 +1139,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
// we switch context from being an RPC node to a sequencer
backend.txPool2.ForceUpdateLatestBlock(executionProgress)

l1BlockSyncer := syncer.NewL1Syncer(
ctx,
ethermanClients,
[]libcommon.Address{cfg.AddressZkevm, cfg.AddressRollup},
[][]libcommon.Hash{{
contracts.SequenceBatchesTopic,
}},
cfg.L1BlockRange,
cfg.L1QueryDelay,
cfg.L1HighestBlockType,
)

backend.syncStages = stages2.NewSequencerZkStages(
backend.sentryCtx,
backend.chainDB,
Expand All @@ -1178,8 +1152,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
backend.engine,
dataStreamServer,
backend.l1Syncer,
seqVerSyncer,
l1BlockSyncer,
backend.txPool2,
backend.txPool2DB,
verifier,
Expand Down Expand Up @@ -1217,6 +1189,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
backend.forkValidator,
backend.engine,
backend.l1Syncer,
l1InfoTreeUpdater,
streamClient,
dataStreamServer,
l1InfoTreeUpdater,
Expand Down
2 changes: 1 addition & 1 deletion turbo/jsonrpc/zkevm_api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ func TestGetBatchByNumber(t *testing.T) {
assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot)
assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot)
assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot)
assert.Equal(common.HexToHash("0xebf7acfdbfb4ea6ef8775e7d2246d7d4f3d8c280fbe7649e1b90eb7490ea19e6"), batch.AccInputHash)
assert.Equal(common.HexToHash("0x4ada34202c254799e2abc9f832764692c2fc6f654fa98a1f9c19391deff623bd"), batch.AccInputHash)
assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash)
assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp)
assert.Equal(true, batch.Closed)
Expand Down
9 changes: 3 additions & 6 deletions turbo/stages/zk_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ func NewDefaultZkStages(ctx context.Context,
forkValidator *engine_helpers.ForkValidator,
engine consensus.Engine,
l1Syncer *syncer.L1Syncer,
updater *l1infotree.Updater,
datastreamClient zkStages.DatastreamClient,
dataStreamServer server.DataStreamServer,
infoTreeUpdater *l1infotree.Updater,
Expand All @@ -51,7 +52,7 @@ func NewDefaultZkStages(ctx context.Context,
runInTestMode := cfg.ImportMode

return zkStages.DefaultZkStages(ctx,
zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk),
zkStages.StageL1SyncerCfg(db, l1Syncer, updater, cfg.Zk),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner),
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
Expand Down Expand Up @@ -101,9 +102,7 @@ func NewSequencerZkStages(ctx context.Context,
forkValidator *engine_helpers.ForkValidator,
engine consensus.Engine,
dataStreamServer server.DataStreamServer,
sequencerStageSyncer *syncer.L1Syncer,
l1Syncer *syncer.L1Syncer,
l1BlockSyncer *syncer.L1Syncer,
txPool *txpool.TxPool,
txPoolDb kv.RwDB,
verifier *legacy_executor_verifier.LegacyExecutorVerifier,
Expand All @@ -117,9 +116,7 @@ func NewSequencerZkStages(ctx context.Context,
runInTestMode := cfg.ImportMode

return zkStages.SequencerZkStages(ctx,
zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer),
zkStages.StageL1SyncerCfg(db, l1Syncer, infoTreeUpdater, cfg.Zk),
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
zkStages.StageSequenceBlocksCfg(
db,
Expand Down
146 changes: 143 additions & 3 deletions zk/acc_input_hash/acc_input_hash.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ We read the inforoot from the s/c in state.
Etrog (forkid 7):
- limitTs the timestamp of the l1 block when the batch was sequenced.
Elderberry (forkid 8):
- limitTs is the timestamp of the last block in the sequenced batch.
- limitTs is the timestamp of the last block in the sequence.
*/

const SpecialZeroHash = "0x27AE5BA08D7291C96C8CBDDCC148BF48A6D68C7974B94356F53754EF6171D757"
Expand Down Expand Up @@ -110,6 +110,8 @@ func NewCalculatorWithBlockReader(ctx context.Context, tx kv.Tx, reader AccInput
return calcConstructor(baseCalc), nil
}

// PRE-ETROG (forkid < 7) Calculator: UNSUPPORTED

type PreFork7Calculator struct {
*BaseCalc
}
Expand All @@ -123,6 +125,8 @@ func (p PreFork7Calculator) Calculate(batchNum uint64) (common.Hash, error) {
return common.Hash{}, nil
}

// ETROG (forkid 7) Calculator

type Fork7Calculator struct {
*BaseCalc
}
Expand Down Expand Up @@ -274,6 +278,8 @@ func (f Fork7Calculator) localAccInputHashCalc(batchNum, startBatchNo uint64, pr
return accInputHash, nil
}

// ELDERBERRY (forkid 8) Calculator

type Fork8Calculator struct {
*BaseCalc
}
Expand All @@ -283,9 +289,143 @@ func NewFork8Calculator(bc *BaseCalc) AccInputHashCalculator {
}

func (f Fork8Calculator) Calculate(batchNum uint64) (common.Hash, error) {
return common.Hash{}, nil
accInputHash, returnedBatchNo, err := f.Reader.GetAccInputHashForBatchOrPrevious(batchNum)
if err != nil {
return common.Hash{}, err
}

// check the forkid of the returnedBatchNo
forkId, err := f.Reader.GetForkId(returnedBatchNo)
if err != nil {
return common.Hash{}, err
}

if forkId < uint64(chain.ForkID7Etrog) {
return common.Hash{}, fmt.Errorf("unsupported fork ID: %d", forkId)
}

// TODO: remove test spoooofing! (1001 and 997 are l1 held batch accinputhashes - sequence ends)
if batchNum >= 7000 {
// let's just spoof it backwards:
accInputHash, returnedBatchNo, err = f.Reader.GetAccInputHashForBatchOrPrevious(7000)
if err != nil {
return common.Hash{}, err
}
}

// if we have it, return it
if returnedBatchNo == batchNum {
return accInputHash, nil
}

// otherwise calculate it
accInputHash, err = f.localAccInputHashCalc(batchNum, returnedBatchNo, accInputHash)
if err != nil {
return common.Hash{}, err
}

return accInputHash, nil
}

func (f Fork8Calculator) localAccInputHashCalc(batchNum, startBatchNo uint64, prevAccInputHash common.Hash) (common.Hash, error) {
return common.Hash{}, nil
var accInputHash common.Hash

infoTreeIndexes, err := f.Reader.GetL1InfoTreeIndexToRoots()
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get l1 info tree indexes: %w", err)
}
if len(infoTreeIndexes) == 0 {
return common.Hash{}, fmt.Errorf("no l1 info tree indexes found")
}

// go from injected batch with known batch 0 accinputhash of 0x0...0
if startBatchNo == 0 {
startBatchNo = 1
}

// Elderberry limitTS is the highest L2 block TS
// get the highest l2 block in the sequence (highest batch)
blockNos, err := f.Reader.GetL2BlockNosByBatch(batchNum)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get l2 block nos by batch for batch %d: %w", batchNum, err)
}
seqHighestBlockNo := blockNos[len(blockNos)-1]
seqHighestBlock, err := f.BlockReader.ReadBlockByNumber(seqHighestBlockNo)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get highest block for batch %d: %w", batchNum, err)
}
limitTs := seqHighestBlock.Time()

// TODO: handle batch 1 case where we should get check the aggregator code: https://github.com/0xPolygon/cdk/blob/develop/aggregator/aggregator.go#L1167

for i := startBatchNo; i <= batchNum; i++ {
currentForkId, err := f.Reader.GetForkId(i)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get fork id for batch %d: %w", i, err)
}

batchBlockNos, err := f.Reader.GetL2BlockNosByBatch(i)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get batch blocks for batch %d: %w", i, err)
}
batchBlocks := []*eritypes.Block{}
var coinbase common.Address
for in, blockNo := range batchBlockNos {
block, err := f.BlockReader.ReadBlockByNumber(blockNo)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get block %d: %w", blockNo, err)
}
if in == 0 {
coinbase = block.Coinbase()
}
batchBlocks = append(batchBlocks, block)
}

lastBlockNoInPreviousBatch := uint64(0)
firstBlockInBatch := batchBlocks[0]
if firstBlockInBatch.NumberU64() != 0 {
lastBlockNoInPreviousBatch = firstBlockInBatch.NumberU64() - 1
}

lastBlockInPreviousBatch, err := f.BlockReader.ReadBlockByNumber(lastBlockNoInPreviousBatch)
if err != nil {
return common.Hash{}, err
}

batchL2Data, err := utils.GenerateBatchDataFromDb(f.Tx, f.Reader, batchBlocks, lastBlockInPreviousBatch, currentForkId)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to generate batch data for batch %d: %w", i, err)
}

highestBlock := batchBlocks[len(batchBlocks)-1]

sr := state.NewPlainState(f.Tx, highestBlock.NumberU64(), systemcontracts.SystemContractCodeLookup["hermez"])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get psr: %w", err)
}
l1InfoRootBytes, err := sr.ReadAccountStorage(state.ADDRESS_SCALABLE_L2, 1, &state.BLOCK_INFO_ROOT_STORAGE_POS)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to read l1 info root: %w", err)
}
sr.Close()
l1InfoRoot := common.BytesToHash(l1InfoRootBytes)

fmt.Println("[l1InfoRoot]", l1InfoRoot.Hex())
fmt.Println("[limitTs]", limitTs)

inputs := utils.AccHashInputs{
OldAccInputHash: prevAccInputHash,
Sequencer: coinbase,
BatchData: batchL2Data,
L1InfoRoot: l1InfoRoot,
LimitTimestamp: limitTs,
ForcedBlockHash: common.Hash{},
}
accInputHash, err = utils.CalculateAccInputHashByForkId(inputs)
if err != nil {
return common.Hash{}, fmt.Errorf("failed to calculate accInputHash for batch %d: %w", i, err)
}
prevAccInputHash = accInputHash
}
return accInputHash, nil
}
Loading

0 comments on commit 4aca241

Please sign in to comment.