From a831054809b395efa441066711edadef0cb5b7b2 Mon Sep 17 00:00:00 2001 From: Moreti Georgiev Date: Thu, 31 Oct 2024 13:27:38 +0200 Subject: [PATCH 01/88] feat: default zkevm.witness flag to false --- README.md | 2 +- cmd/utils/flags.go | 10 +++++----- zk/tests/nightly-l1-recovery/network5-config.yaml | 1 - zk/tests/nightly-l1-recovery/network5-sync-config.yaml | 1 - zk/tests/nightly-l1-recovery/network8-config.yaml | 1 - zk/tests/nightly-l1-recovery/network8-sync-config.yaml | 1 - zk/tests/unwinds/config/dynamic-integration8.yaml | 1 - 7 files changed, 6 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index a07076c4488..4e58a99cadb 100644 --- a/README.md +++ b/README.md @@ -198,7 +198,7 @@ For a full explanation of the config options, see below: Sequencer specific config: - `zkevm.executor-urls`: A csv list of the executor URLs. These will be used in a round robbin fashion by the sequencer - `zkevm.executor-strict`: Defaulted to true, but can be set to false when running the sequencer without verifications (use with extreme caution) -- `zkevm.witness-full`: Defaulted to true. Controls whether the full or partial witness is used with the executor. +- `zkevm.witness-full`: Defaulted to false. Controls whether the full or partial witness is used with the executor. - `zkevm.reject-smart-contract-deployments`: Defaulted to false. Controls whether smart contract deployments are rejected by the TxPool. Resource Utilisation config: diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7db91e06bf3..8bd7e1f7cb7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -696,7 +696,7 @@ var ( WitnessFullFlag = cli.BoolFlag{ Name: "zkevm.witness-full", Usage: "Enable/Diable witness full", - Value: true, + Value: false, } SyncLimit = cli.UintFlag{ Name: "zkevm.sync-limit", @@ -1433,6 +1433,7 @@ func setNodeUserIdent(ctx *cli.Context, cfg *nodecfg.Config) { cfg.UserIdent = identity } } + func setNodeUserIdentCobra(f *pflag.FlagSet, cfg *nodecfg.Config) { if identity := f.String(IdentityFlag.Name, IdentityFlag.Value, IdentityFlag.Usage); identity != nil && len(*identity) > 0 { cfg.UserIdent = *identity @@ -1741,7 +1742,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config, nodeName, datadir string, l if ctx.String(ChainFlag.Name) == networkname.DevChainName { // --dev mode can't use p2p networking. - //cfg.MaxPeers = 0 // It can have peers otherwise local sync is not possible + // cfg.MaxPeers = 0 // It can have peers otherwise local sync is not possible if !ctx.IsSet(ListenPortFlag.Name) { cfg.ListenAddr = ":0" } @@ -1762,7 +1763,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logger) { func SetNodeConfigCobra(cmd *cobra.Command, cfg *nodecfg.Config) { flags := cmd.Flags() - //SetP2PConfig(ctx, &cfg.P2P) + // SetP2PConfig(ctx, &cfg.P2P) setNodeUserIdentCobra(flags, cfg) setDataDirCobra(flags, cfg) } @@ -2138,7 +2139,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C } cfg.Sync.UseSnapshots = ethconfig.UseSnapshotsByChainName(chain) - if ctx.IsSet(SnapshotFlag.Name) { //force override default by cli + if ctx.IsSet(SnapshotFlag.Name) { // force override default by cli cfg.Sync.UseSnapshots = ctx.Bool(SnapshotFlag.Name) } @@ -2169,7 +2170,6 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C webseedsList = append(webseedsList, known...) } cfg.Downloader, err = downloadercfg2.New(cfg.Dirs, version, lvl, downloadRate, uploadRate, ctx.Int(TorrentPortFlag.Name), ctx.Int(TorrentConnsPerFileFlag.Name), ctx.Int(TorrentDownloadSlotsFlag.Name), libcommon.CliString2Array(ctx.String(TorrentStaticPeersFlag.Name)), webseedsList, chain, true) - if err != nil { panic(err) } diff --git a/zk/tests/nightly-l1-recovery/network5-config.yaml b/zk/tests/nightly-l1-recovery/network5-config.yaml index b274cc3b290..80d134d3742 100644 --- a/zk/tests/nightly-l1-recovery/network5-config.yaml +++ b/zk/tests/nightly-l1-recovery/network5-config.yaml @@ -23,7 +23,6 @@ zkevm.datastream-version: 2 zkevm.data-stream-host: "127.0.0.1" zkevm.executor-strict: false # zkevm.executor-urls: "zkevm2-stateless-executor:50071" -zkevm.witness-full: false zkevm.sequencer-block-seal-time: "5s" zkevm.sequencer-batch-seal-time: "15m" zkevm.allow-pre-eip155-transactions: true diff --git a/zk/tests/nightly-l1-recovery/network5-sync-config.yaml b/zk/tests/nightly-l1-recovery/network5-sync-config.yaml index 5a09921c6af..f99dfb183bd 100644 --- a/zk/tests/nightly-l1-recovery/network5-sync-config.yaml +++ b/zk/tests/nightly-l1-recovery/network5-sync-config.yaml @@ -19,7 +19,6 @@ zkevm.l1-query-delay: 6000 zkevm.l1-first-block: 6032365 zkevm.executor-strict: false # zkevm.executor-urls: "zkevm2-stateless-executor:50071" -zkevm.witness-full: false zkevm.sequencer-block-seal-time: "5s" zkevm.sequencer-batch-seal-time: "15m" zkevm.allow-pre-eip155-transactions: true diff --git a/zk/tests/nightly-l1-recovery/network8-config.yaml b/zk/tests/nightly-l1-recovery/network8-config.yaml index 4c414cdc9c0..8fa15132691 100644 --- a/zk/tests/nightly-l1-recovery/network8-config.yaml +++ b/zk/tests/nightly-l1-recovery/network8-config.yaml @@ -22,7 +22,6 @@ zkevm.datastream-version: 2 zkevm.data-stream-host: "127.0.0.1" # zkevm.sequencer-initial-fork-id: 9 zkevm.executor-strict: false -zkevm.witness-full: false zkevm.sequencer-block-seal-time: "5s" zkevm.sequencer-batch-seal-time: "15m" zkevm.allow-pre-eip155-transactions: true diff --git a/zk/tests/nightly-l1-recovery/network8-sync-config.yaml b/zk/tests/nightly-l1-recovery/network8-sync-config.yaml index f51c251ea81..745635e524a 100644 --- a/zk/tests/nightly-l1-recovery/network8-sync-config.yaml +++ b/zk/tests/nightly-l1-recovery/network8-sync-config.yaml @@ -22,7 +22,6 @@ zkevm.datastream-version: 2 #zkevm.data-stream-host: "127.0.0.1" # zkevm.sequencer-initial-fork-id: 9 zkevm.executor-strict: false -zkevm.witness-full: false zkevm.sequencer-block-seal-time: "5s" zkevm.sequencer-batch-seal-time: "15m" zkevm.allow-pre-eip155-transactions: true diff --git a/zk/tests/unwinds/config/dynamic-integration8.yaml b/zk/tests/unwinds/config/dynamic-integration8.yaml index 2590341925d..519f9882ace 100644 --- a/zk/tests/unwinds/config/dynamic-integration8.yaml +++ b/zk/tests/unwinds/config/dynamic-integration8.yaml @@ -21,7 +21,6 @@ zkevm.l1-first-block: 6411787 zkevm.datastream-version: 2 # zkevm.data-stream-host: "127.0.0.1" zkevm.executor-strict: false -zkevm.witness-full: false zkevm.sequencer-block-seal-time: "5s" zkevm.sequencer-batch-seal-time: "15m" zkevm.allow-pre-eip155-transactions: true From fda81661fd8b6a0e76a03cfd2ca127490bfe2b00 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:59:36 +0200 Subject: [PATCH 02/88] fix: parse rpcBatchNumber correctly by string (#1395) --- turbo/jsonrpc/zkevm_api.go | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 8e341114ddd..7523214d8b4 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -317,13 +317,11 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers bds := make([]*types.BatchDataSlim, 0, len(batchNumbers.Numbers)) for _, batchRpcNumber := range batchNumbers.Numbers { - // looks weird but we're using the rpc.BlockNumber type to represent the batch number, LatestBlockNumber represents latest batch - if batchRpcNumber == rpc.LatestBlockNumber { - batchRpcNumber = rpc.BlockNumber(highestBatchNo) + batchNo, _, err := rpchelper.GetBatchNumber(batchRpcNumber, tx, nil) + if err != nil { + return nil, err } - batchNo := batchRpcNumber.Uint64() - bd := &types.BatchDataSlim{ Number: types.ArgUint64(batchNo), Empty: false, @@ -419,7 +417,7 @@ func (api *ZkEvmAPIImpl) getBatchBlocksWithSenders(ctx context.Context, tx kv.Tx // GetBatchByNumber returns a batch from the current canonical chain. If number is nil, the // latest known batch is returned. -func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.BlockNumber, fullTx *bool) (json.RawMessage, error) { +func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rpc.BlockNumber, fullTx *bool) (json.RawMessage, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return nil, err @@ -435,6 +433,11 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B return nil, err } + batchNo, _, err := rpchelper.GetBatchNumber(rpcBatchNumber, tx, nil) + if err != nil { + return nil, err + } + // check sync status of node syncStatus, err := api.ethApi.Syncing(ctx) if err != nil { @@ -447,17 +450,10 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B } } - if batchNumber.Uint64() > highestBatchNo { + if batchNo > highestBatchNo { return nil, nil } - // looks weird but we're using the rpc.BlockNumber type to represent the batch number, LatestBlockNumber represents latest batch - if batchNumber == rpc.LatestBlockNumber { - batchNumber = rpc.BlockNumber(highestBatchNo) - } - - batchNo := batchNumber.Uint64() - batch := &types.Batch{ Number: types.ArgUint64(batchNo), } @@ -1797,14 +1793,19 @@ func (api *ZkEvmAPIImpl) GetForkById(ctx context.Context, forkId hexutil.Uint64) } // GetForkIdByBatchNumber returns the fork ID given the provided batch number -func (api *ZkEvmAPIImpl) GetForkIdByBatchNumber(ctx context.Context, batchNumber rpc.BlockNumber) (hexutil.Uint64, error) { +func (api *ZkEvmAPIImpl) GetForkIdByBatchNumber(ctx context.Context, rpcBatchNumber rpc.BlockNumber) (hexutil.Uint64, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return hexutil.Uint64(0), err } defer tx.Rollback() - currentForkId, err := getForkIdByBatchNo(tx, uint64(batchNumber)) + batchNumber, _, err := rpchelper.GetBatchNumber(rpcBatchNumber, tx, nil) + if err != nil { + return 0, err + } + + currentForkId, err := getForkIdByBatchNo(tx, batchNumber) if err != nil { return 0, err } From 7cfc631c1fcbe45681f5a90dcbc9522e5f24182b Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:07:10 +0000 Subject: [PATCH 03/88] Regular info tree updates (#1399) * starting work on info tree updates during execution * info tree updater in sequencer loop * logging latest index for info tree updates --- cmd/utils/flags.go | 7 +- eth/backend.go | 7 +- eth/ethconfig/config_zkevm.go | 5 +- turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 1 + turbo/stages/zk_stages.go | 10 +- zk/l1infotree/updater.go | 297 ++++++++++++++++++++++ zk/stages/stage_l1_info_tree.go | 210 +-------------- zk/stages/stage_l1_sequencer_sync.go | 41 --- zk/stages/stage_sequence_execute.go | 18 +- zk/stages/stage_sequence_execute_utils.go | 14 +- 11 files changed, 360 insertions(+), 251 deletions(-) create mode 100644 zk/l1infotree/updater.go diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8bd7e1f7cb7..273a5a9b1d5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -738,9 +738,14 @@ var ( Usage: "The file that contains the initial (injected) batch data.", Value: "", } + InfoTreeUpdateInterval = cli.DurationFlag{ + Name: "zkevm.info-tree-update-interval", + Usage: "The interval at which the sequencer checks the L1 for new GER information", + Value: 1 * time.Minute, + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", - Usage: "Number of entries to print from the ACL history on node startup", + Usage: "Number of entries to print from the ACL history on node start up", Value: 10, } DebugTimers = cli.BoolFlag{ diff --git a/eth/backend.go b/eth/backend.go index a07a2f89d5c..7299e75844f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -139,6 +139,7 @@ import ( "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/etherman" + "github.com/ledgerwatch/erigon/zk/l1infotree" ) // Config contains the configuration options of the ETH protocol. @@ -1097,6 +1098,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger cfg.L1HighestBlockType, ) + l1InfoTreeUpdater := l1infotree.NewUpdater(cfg.Zk, l1InfoTreeSyncer) + if isSequencer { // if we are sequencing transactions, we do the sequencing loop... witnessGenerator := witness.NewGenerator( @@ -1167,11 +1170,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.dataStream, backend.l1Syncer, seqVerSyncer, - l1InfoTreeSyncer, l1BlockSyncer, backend.txPool2, backend.txPool2DB, verifier, + l1InfoTreeUpdater, ) backend.syncUnwindOrder = zkStages.ZkSequencerUnwindOrder @@ -1205,9 +1208,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.forkValidator, backend.engine, backend.l1Syncer, - l1InfoTreeSyncer, streamClient, backend.dataStream, + l1InfoTreeUpdater, ) backend.syncUnwindOrder = zkStages.ZkUnwindOrder diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index c52e6f6021d..490aaa62613 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -87,8 +87,9 @@ type Zk struct { TxPoolRejectSmartContractDeployments bool - InitialBatchCfgFile string - ACLPrintHistory int + InitialBatchCfgFile string + ACLPrintHistory int + InfoTreeUpdateInterval time.Duration } var DefaultZkConfig = &Zk{} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 93d00265e43..b3cd3537cd6 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -284,4 +284,5 @@ var DefaultFlags = []cli.Flag{ &utils.InitialBatchCfgFile, &utils.ACLPrintHistory, + &utils.InfoTreeUpdateInterval, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 4b9eeee85ba..2f459516a9e 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -186,6 +186,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { VirtualCountersSmtReduction: ctx.Float64(utils.VirtualCountersSmtReduction.Name), InitialBatchCfgFile: ctx.String(utils.InitialBatchCfgFile.Name), ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name), + InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), } utils2.EnableTimer(cfg.DebugTimers) diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 05d45326c58..1a796e37250 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -20,6 +20,7 @@ import ( zkStages "github.com/ledgerwatch/erigon/zk/stages" "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/erigon/zk/txpool" + "github.com/ledgerwatch/erigon/zk/l1infotree" ) // NewDefaultZkStages creates stages for zk syncer (RPC mode) @@ -34,9 +35,9 @@ func NewDefaultZkStages(ctx context.Context, forkValidator *engine_helpers.ForkValidator, engine consensus.Engine, l1Syncer *syncer.L1Syncer, - l1InfoTreeSyncer *syncer.L1Syncer, datastreamClient zkStages.DatastreamClient, datastreamServer *datastreamer.StreamServer, + infoTreeUpdater *l1infotree.Updater, ) []*stagedsync.Stage { dirs := cfg.Dirs blockWriter := blockio.NewBlockWriter(cfg.HistoryV3) @@ -51,7 +52,7 @@ func NewDefaultZkStages(ctx context.Context, return zkStages.DefaultZkStages(ctx, zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), - zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer), + zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner), zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), @@ -101,11 +102,11 @@ func NewSequencerZkStages(ctx context.Context, datastreamServer *datastreamer.StreamServer, sequencerStageSyncer *syncer.L1Syncer, l1Syncer *syncer.L1Syncer, - l1InfoTreeSyncer *syncer.L1Syncer, l1BlockSyncer *syncer.L1Syncer, txPool *txpool.TxPool, txPoolDb kv.RwDB, verifier *legacy_executor_verifier.LegacyExecutorVerifier, + infoTreeUpdater *l1infotree.Updater, ) []*stagedsync.Stage { dirs := cfg.Dirs blockReader := freezeblocks.NewBlockReader(snapshots, nil) @@ -117,7 +118,7 @@ func NewSequencerZkStages(ctx context.Context, return zkStages.SequencerZkStages(ctx, zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), zkStages.StageL1SequencerSyncCfg(db, cfg.Zk, sequencerStageSyncer), - zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer), + zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), zkStages.StageSequenceBlocksCfg( @@ -144,6 +145,7 @@ func NewSequencerZkStages(ctx context.Context, txPoolDb, verifier, uint16(cfg.YieldSize), + infoTreeUpdater, ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), diff --git a/zk/l1infotree/updater.go b/zk/l1infotree/updater.go new file mode 100644 index 00000000000..4f51c861312 --- /dev/null +++ b/zk/l1infotree/updater.go @@ -0,0 +1,297 @@ +package l1infotree + +import ( + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + zkTypes "github.com/ledgerwatch/erigon/zk/types" + "github.com/ledgerwatch/erigon/core/types" + "time" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/log/v3" + "fmt" + "sort" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/iden3/go-iden3-crypto/keccak256" + "errors" +) + +type Syncer interface { + IsSyncStarted() bool + RunQueryBlocks(lastCheckedBlock uint64) + GetLogsChan() chan []types.Log + GetProgressMessageChan() chan string + IsDownloading() bool + GetHeader(blockNumber uint64) (*types.Header, error) + L1QueryHeaders(logs []types.Log) (map[uint64]*types.Header, error) + StopQueryBlocks() + ConsumeQueryBlocks() + WaitQueryBlocksToFinish() +} + +type Updater struct { + cfg *ethconfig.Zk + syncer Syncer + progress uint64 + latestUpdate *zkTypes.L1InfoTreeUpdate +} + +func NewUpdater(cfg *ethconfig.Zk, syncer Syncer) *Updater { + return &Updater{ + cfg: cfg, + syncer: syncer, + } +} + +func (u *Updater) GetProgress() uint64 { + return u.progress +} + +func (u *Updater) GetLatestUpdate() *zkTypes.L1InfoTreeUpdate { + return u.latestUpdate +} + +func (u *Updater) WarmUp(tx kv.RwTx) (err error) { + defer func() { + if err != nil { + u.syncer.StopQueryBlocks() + u.syncer.ConsumeQueryBlocks() + u.syncer.WaitQueryBlocksToFinish() + } + }() + + hermezDb := hermez_db.NewHermezDb(tx) + + progress, err := stages.GetStageProgress(tx, stages.L1InfoTree) + if err != nil { + return err + } + if progress == 0 { + progress = u.cfg.L1FirstBlock - 1 + } + + u.progress = progress + + latestUpdate, _, err := hermezDb.GetLatestL1InfoTreeUpdate() + if err != nil { + return err + } + + u.latestUpdate = latestUpdate + + if !u.syncer.IsSyncStarted() { + u.syncer.RunQueryBlocks(u.progress) + } + + return nil +} + +func (u *Updater) CheckForInfoTreeUpdates(logPrefix string, tx kv.RwTx) (allLogs []types.Log, err error) { + defer func() { + if err != nil { + u.syncer.StopQueryBlocks() + u.syncer.ConsumeQueryBlocks() + u.syncer.WaitQueryBlocksToFinish() + } + }() + + hermezDb := hermez_db.NewHermezDb(tx) + logChan := u.syncer.GetLogsChan() + progressChan := u.syncer.GetProgressMessageChan() + + // first get all the logs we need to process +LOOP: + for { + select { + case logs := <-logChan: + allLogs = append(allLogs, logs...) + case msg := <-progressChan: + log.Info(fmt.Sprintf("[%s] %s", logPrefix, msg)) + default: + if !u.syncer.IsDownloading() { + break LOOP + } + time.Sleep(10 * time.Millisecond) + } + } + + // sort the logs by block number - it is important that we process them in order to get the index correct + sort.Slice(allLogs, func(i, j int) bool { + l1 := allLogs[i] + l2 := allLogs[j] + // first sort by block number and if equal then by tx index + if l1.BlockNumber != l2.BlockNumber { + return l1.BlockNumber < l2.BlockNumber + } + if l1.TxIndex != l2.TxIndex { + return l1.TxIndex < l2.TxIndex + } + return l1.Index < l2.Index + }) + + // chunk the logs into batches, so we don't overload the RPC endpoints too much at once + chunks := chunkLogs(allLogs, 50) + + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + processed := 0 + + tree, err := initialiseL1InfoTree(hermezDb) + if err != nil { + return nil, err + } + + // process the logs in chunks + for _, chunk := range chunks { + select { + case <-ticker.C: + log.Info(fmt.Sprintf("[%s] Processed %d/%d logs, %d%% complete", logPrefix, processed, len(allLogs), processed*100/len(allLogs))) + default: + } + + headersMap, err := u.syncer.L1QueryHeaders(chunk) + if err != nil { + return nil, err + } + + for _, l := range chunk { + switch l.Topics[0] { + case contracts.UpdateL1InfoTreeTopic: + header := headersMap[l.BlockNumber] + if header == nil { + header, err = u.syncer.GetHeader(l.BlockNumber) + if err != nil { + return nil, err + } + } + + tmpUpdate, err := createL1InfoTreeUpdate(l, header) + if err != nil { + return nil, err + } + + leafHash := HashLeafData(tmpUpdate.GER, tmpUpdate.ParentHash, tmpUpdate.Timestamp) + if tree.LeafExists(leafHash) { + log.Warn("Skipping log as L1 Info Tree leaf already exists", "hash", leafHash) + continue + } + + if u.latestUpdate != nil { + tmpUpdate.Index = u.latestUpdate.Index + 1 + } // if latestUpdate is nil then Index = 0 which is the default value so no need to set it + u.latestUpdate = tmpUpdate + + newRoot, err := tree.AddLeaf(uint32(u.latestUpdate.Index), leafHash) + if err != nil { + return nil, err + } + log.Debug("New L1 Index", + "index", u.latestUpdate.Index, + "root", newRoot.String(), + "mainnet", u.latestUpdate.MainnetExitRoot.String(), + "rollup", u.latestUpdate.RollupExitRoot.String(), + "ger", u.latestUpdate.GER.String(), + "parent", u.latestUpdate.ParentHash.String(), + ) + + if err = handleL1InfoTreeUpdate(hermezDb, u.latestUpdate); err != nil { + return nil, err + } + if err = hermezDb.WriteL1InfoTreeLeaf(u.latestUpdate.Index, leafHash); err != nil { + return nil, err + } + if err = hermezDb.WriteL1InfoTreeRoot(common.BytesToHash(newRoot[:]), u.latestUpdate.Index); err != nil { + return nil, err + } + + processed++ + default: + log.Warn("received unexpected topic from l1 info tree stage", "topic", l.Topics[0]) + } + } + } + + // save the progress - we add one here so that we don't cause overlap on the next run. We don't want to duplicate an info tree update in the db + if len(allLogs) > 0 { + u.progress = allLogs[len(allLogs)-1].BlockNumber + 1 + } + if err = stages.SaveStageProgress(tx, stages.L1InfoTree, u.progress); err != nil { + return nil, err + } + + return allLogs, nil +} + +func chunkLogs(slice []types.Log, chunkSize int) [][]types.Log { + var chunks [][]types.Log + for i := 0; i < len(slice); i += chunkSize { + end := i + chunkSize + + // If end is greater than the length of the slice, reassign it to the length of the slice + if end > len(slice) { + end = len(slice) + } + + chunks = append(chunks, slice[i:end]) + } + return chunks +} + +func initialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { + leaves, err := hermezDb.GetAllL1InfoTreeLeaves() + if err != nil { + return nil, err + } + + allLeaves := make([][32]byte, len(leaves)) + for i, l := range leaves { + allLeaves[i] = l + } + + tree, err := NewL1InfoTree(32, allLeaves) + if err != nil { + return nil, err + } + + return tree, nil +} + +func createL1InfoTreeUpdate(l types.Log, header *types.Header) (*zkTypes.L1InfoTreeUpdate, error) { + if len(l.Topics) != 3 { + return nil, errors.New("received log for info tree that did not have 3 topics") + } + + if l.BlockNumber != header.Number.Uint64() { + return nil, errors.New("received log for info tree that did not match the block number") + } + + mainnetExitRoot := l.Topics[1] + rollupExitRoot := l.Topics[2] + combined := append(mainnetExitRoot.Bytes(), rollupExitRoot.Bytes()...) + ger := keccak256.Hash(combined) + update := &zkTypes.L1InfoTreeUpdate{ + GER: common.BytesToHash(ger), + MainnetExitRoot: mainnetExitRoot, + RollupExitRoot: rollupExitRoot, + BlockNumber: l.BlockNumber, + Timestamp: header.Time, + ParentHash: header.ParentHash, + } + + return update, nil +} + +func handleL1InfoTreeUpdate( + hermezDb *hermez_db.HermezDb, + update *zkTypes.L1InfoTreeUpdate, +) error { + var err error + if err = hermezDb.WriteL1InfoTreeUpdate(update); err != nil { + return err + } + if err = hermezDb.WriteL1InfoTreeUpdateToGer(update); err != nil { + return err + } + return nil +} diff --git a/zk/stages/stage_l1_info_tree.go b/zk/stages/stage_l1_info_tree.go index c277f23ed40..19c2202ad35 100644 --- a/zk/stages/stage_l1_info_tree.go +++ b/zk/stages/stage_l1_info_tree.go @@ -3,32 +3,24 @@ package stages import ( "context" "fmt" - "sort" - "time" - - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/zk/contracts" - "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/log/v3" ) type L1InfoTreeCfg struct { - db kv.RwDB - zkCfg *ethconfig.Zk - syncer IL1Syncer + db kv.RwDB + zkCfg *ethconfig.Zk + updater *l1infotree.Updater } -func StageL1InfoTreeCfg(db kv.RwDB, zkCfg *ethconfig.Zk, sync IL1Syncer) L1InfoTreeCfg { +func StageL1InfoTreeCfg(db kv.RwDB, zkCfg *ethconfig.Zk, updater *l1infotree.Updater) L1InfoTreeCfg { return L1InfoTreeCfg{ - db: db, - zkCfg: zkCfg, - syncer: sync, + db: db, + zkCfg: zkCfg, + updater: updater, } } @@ -54,161 +46,21 @@ func SpawnL1InfoTreeStage( defer tx.Rollback() } - hermezDb := hermez_db.NewHermezDb(tx) - - progress, err := stages.GetStageProgress(tx, stages.L1InfoTree) - if err != nil { + if err := cfg.updater.WarmUp(tx); err != nil { return err } - if progress == 0 { - progress = cfg.zkCfg.L1FirstBlock - 1 - } - latestUpdate, _, err := hermezDb.GetLatestL1InfoTreeUpdate() + allLogs, err := cfg.updater.CheckForInfoTreeUpdates(logPrefix, tx) if err != nil { return err } - if !cfg.syncer.IsSyncStarted() { - cfg.syncer.RunQueryBlocks(progress) - defer func() { - if funcErr != nil { - cfg.syncer.StopQueryBlocks() - cfg.syncer.ConsumeQueryBlocks() - cfg.syncer.WaitQueryBlocksToFinish() - } - }() - } - - logChan := cfg.syncer.GetLogsChan() - progressChan := cfg.syncer.GetProgressMessageChan() - - // first get all the logs we need to process - var allLogs []types.Log -LOOP: - for { - select { - case logs := <-logChan: - allLogs = append(allLogs, logs...) - case msg := <-progressChan: - log.Info(fmt.Sprintf("[%s] %s", logPrefix, msg)) - default: - if !cfg.syncer.IsDownloading() { - break LOOP - } - time.Sleep(10 * time.Millisecond) - } - } - - // sort the logs by block number - it is important that we process them in order to get the index correct - sort.Slice(allLogs, func(i, j int) bool { - l1 := allLogs[i] - l2 := allLogs[j] - // first sort by block number and if equal then by tx index - if l1.BlockNumber != l2.BlockNumber { - return l1.BlockNumber < l2.BlockNumber - } - if l1.TxIndex != l2.TxIndex { - return l1.TxIndex < l2.TxIndex - } - return l1.Index < l2.Index - }) - - // chunk the logs into batches, so we don't overload the RPC endpoints too much at once - chunks := chunkLogs(allLogs, 50) - - ticker := time.NewTicker(10 * time.Second) - defer ticker.Stop() - processed := 0 - - tree, err := initialiseL1InfoTree(hermezDb) - if err != nil { - funcErr = err - return funcErr + var latestIndex uint64 + latestUpdate := cfg.updater.GetLatestUpdate() + if latestUpdate != nil { + latestIndex = latestUpdate.Index } - - // process the logs in chunks - for _, chunk := range chunks { - select { - case <-ticker.C: - log.Info(fmt.Sprintf("[%s] Processed %d/%d logs, %d%% complete", logPrefix, processed, len(allLogs), processed*100/len(allLogs))) - default: - } - - headersMap, err := cfg.syncer.L1QueryHeaders(chunk) - if err != nil { - funcErr = err - return funcErr - } - - for _, l := range chunk { - switch l.Topics[0] { - case contracts.UpdateL1InfoTreeTopic: - header := headersMap[l.BlockNumber] - if header == nil { - header, funcErr = cfg.syncer.GetHeader(l.BlockNumber) - if funcErr != nil { - return funcErr - } - } - - tmpUpdate, err := CreateL1InfoTreeUpdate(l, header) - if err != nil { - funcErr = err - return funcErr - } - - leafHash := l1infotree.HashLeafData(tmpUpdate.GER, tmpUpdate.ParentHash, tmpUpdate.Timestamp) - if tree.LeafExists(leafHash) { - log.Warn("Skipping log as L1 Info Tree leaf already exists", "hash", leafHash) - continue - } - - if latestUpdate != nil { - tmpUpdate.Index = latestUpdate.Index + 1 - } // if latestUpdate is nil then Index = 0 which is the default value so no need to set it - latestUpdate = tmpUpdate - - newRoot, err := tree.AddLeaf(uint32(latestUpdate.Index), leafHash) - if err != nil { - funcErr = err - return funcErr - } - log.Debug("New L1 Index", - "index", latestUpdate.Index, - "root", newRoot.String(), - "mainnet", latestUpdate.MainnetExitRoot.String(), - "rollup", latestUpdate.RollupExitRoot.String(), - "ger", latestUpdate.GER.String(), - "parent", latestUpdate.ParentHash.String(), - ) - - if funcErr = HandleL1InfoTreeUpdate(hermezDb, latestUpdate); funcErr != nil { - return funcErr - } - if funcErr = hermezDb.WriteL1InfoTreeLeaf(latestUpdate.Index, leafHash); funcErr != nil { - return funcErr - } - if funcErr = hermezDb.WriteL1InfoTreeRoot(common.BytesToHash(newRoot[:]), latestUpdate.Index); funcErr != nil { - return funcErr - } - - processed++ - default: - log.Warn("received unexpected topic from l1 info tree stage", "topic", l.Topics[0]) - } - } - } - - // save the progress - we add one here so that we don't cause overlap on the next run. We don't want to duplicate an info tree update in the db - if len(allLogs) > 0 { - progress = allLogs[len(allLogs)-1].BlockNumber + 1 - } - if funcErr = stages.SaveStageProgress(tx, stages.L1InfoTree, progress); funcErr != nil { - return funcErr - } - - log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(allLogs)) + log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(allLogs), "latestIndex", latestIndex) if freshTx { if funcErr = tx.Commit(); funcErr != nil { @@ -219,40 +71,6 @@ LOOP: return nil } -func chunkLogs(slice []types.Log, chunkSize int) [][]types.Log { - var chunks [][]types.Log - for i := 0; i < len(slice); i += chunkSize { - end := i + chunkSize - - // If end is greater than the length of the slice, reassign it to the length of the slice - if end > len(slice) { - end = len(slice) - } - - chunks = append(chunks, slice[i:end]) - } - return chunks -} - -func initialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*l1infotree.L1InfoTree, error) { - leaves, err := hermezDb.GetAllL1InfoTreeLeaves() - if err != nil { - return nil, err - } - - allLeaves := make([][32]byte, len(leaves)) - for i, l := range leaves { - allLeaves[i] = l - } - - tree, err := l1infotree.NewL1InfoTree(32, allLeaves) - if err != nil { - return nil, err - } - - return tree, nil -} - func UnwindL1InfoTreeStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg L1InfoTreeCfg, ctx context.Context) error { return nil } diff --git a/zk/stages/stage_l1_sequencer_sync.go b/zk/stages/stage_l1_sequencer_sync.go index b0b20de4295..ee2e12f83ca 100644 --- a/zk/stages/stage_l1_sequencer_sync.go +++ b/zk/stages/stage_l1_sequencer_sync.go @@ -2,12 +2,10 @@ package stages import ( "context" - "errors" "fmt" "math/big" "time" - "github.com/iden3/go-iden3-crypto/keccak256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" ethTypes "github.com/ledgerwatch/erigon/core/types" @@ -200,45 +198,6 @@ Loop: return nil } -func CreateL1InfoTreeUpdate(l ethTypes.Log, header *ethTypes.Header) (*types.L1InfoTreeUpdate, error) { - if len(l.Topics) != 3 { - return nil, errors.New("received log for info tree that did not have 3 topics") - } - - if l.BlockNumber != header.Number.Uint64() { - return nil, errors.New("received log for info tree that did not match the block number") - } - - mainnetExitRoot := l.Topics[1] - rollupExitRoot := l.Topics[2] - combined := append(mainnetExitRoot.Bytes(), rollupExitRoot.Bytes()...) - ger := keccak256.Hash(combined) - update := &types.L1InfoTreeUpdate{ - GER: common.BytesToHash(ger), - MainnetExitRoot: mainnetExitRoot, - RollupExitRoot: rollupExitRoot, - BlockNumber: l.BlockNumber, - Timestamp: header.Time, - ParentHash: header.ParentHash, - } - - return update, nil -} - -func HandleL1InfoTreeUpdate( - hermezDb *hermez_db.HermezDb, - update *types.L1InfoTreeUpdate, -) error { - var err error - if err = hermezDb.WriteL1InfoTreeUpdate(update); err != nil { - return err - } - if err = hermezDb.WriteL1InfoTreeUpdateToGer(update); err != nil { - return err - } - return nil -} - const ( injectedBatchLogTransactionStartByte = 128 injectedBatchLastGerStartByte = 31 diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 5ebc6a89382..82b397ee8df 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -81,6 +81,10 @@ func sequencingBatchStep( } defer sdb.tx.Rollback() + if err = cfg.infoTreeUpdater.WarmUp(sdb.tx); err != nil { + return err + } + executionAt, err := s.ExecutionAt(sdb.tx) if err != nil { return err @@ -196,10 +200,11 @@ func sequencingBatchStep( } } - batchTicker, logTicker, blockTicker := prepareTickers(batchContext.cfg) + batchTicker, logTicker, blockTicker, infoTreeTicker := prepareTickers(batchContext.cfg) defer batchTicker.Stop() defer logTicker.Stop() defer blockTicker.Stop() + defer infoTreeTicker.Stop() log.Info(fmt.Sprintf("[%s] Starting batch %d...", logPrefix, batchState.batchNumber)) @@ -302,6 +307,17 @@ func sequencingBatchStep( log.Debug(fmt.Sprintf("[%s] Batch timeout reached", logPrefix)) batchTimedOut = true } + case <-infoTreeTicker.C: + newLogs, err := cfg.infoTreeUpdater.CheckForInfoTreeUpdates(logPrefix, sdb.tx) + if err != nil { + return err + } + var latestIndex uint64 + latest := cfg.infoTreeUpdater.GetLatestUpdate() + if latest != nil { + latestIndex = latest.Index + } + log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(newLogs), "latestIndex", latestIndex) default: if batchState.isLimboRecovery() { batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index c699d553483..52045d2e03d 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -39,6 +39,7 @@ import ( zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/zk/l1infotree" ) const ( @@ -84,6 +85,8 @@ type SequenceBlockCfg struct { legacyVerifier *verifier.LegacyExecutorVerifier yieldSize uint16 + + infoTreeUpdater *l1infotree.Updater } func StageSequenceBlocksCfg( @@ -112,6 +115,7 @@ func StageSequenceBlocksCfg( txPoolDb kv.RwDB, legacyVerifier *verifier.LegacyExecutorVerifier, yieldSize uint16, + infoTreeUpdater *l1infotree.Updater, ) SequenceBlockCfg { return SequenceBlockCfg{ @@ -139,6 +143,7 @@ func StageSequenceBlocksCfg( txPoolDb: txPoolDb, legacyVerifier: legacyVerifier, yieldSize: yieldSize, + infoTreeUpdater: infoTreeUpdater, } } @@ -168,10 +173,10 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC func validateIfDatastreamIsAheadOfExecution( s *stagedsync.StageState, - // u stagedsync.Unwinder, +// u stagedsync.Unwinder, ctx context.Context, cfg SequenceBlockCfg, - // historyCfg stagedsync.HistoryCfg, +// historyCfg stagedsync.HistoryCfg, ) error { roTx, err := cfg.db.BeginRo(ctx) if err != nil { @@ -336,12 +341,13 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, batchState *BatchState, prop return } -func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ticker) { +func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ticker, *time.Ticker) { batchTicker := time.NewTicker(cfg.zk.SequencerBatchSealTime) logTicker := time.NewTicker(10 * time.Second) blockTicker := time.NewTicker(cfg.zk.SequencerBlockSealTime) + infoTreeTicker := time.NewTicker(cfg.zk.InfoTreeUpdateInterval) - return batchTicker, logTicker, blockTicker + return batchTicker, logTicker, blockTicker, infoTreeTicker } // will be called at the start of every new block created within a batch to figure out if there is a new GER From 7990afa9389911f181918c87e2aeacb48b1251de Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Mon, 4 Nov 2024 15:35:50 +0000 Subject: [PATCH 04/88] tweak(ci): kurtosis upgrade prep (#1361) --- .github/workflows/ci_zkevm.yml | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 26da9a0d3de..f1803011bfd 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -106,15 +106,10 @@ jobs: sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + run: | + kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"data_availability_mode": "${{ matrix.da-mode }}", "cdk_erigon_node_image": "cdk-erigon:local"}}' - name: Monitor verified batches working-directory: ./kurtosis-cdk @@ -233,8 +228,6 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - /usr/local/bin/yq -i '.args.erigon_strict_mode = false' params.yml sed -i 's/"londonBlock": [0-9]\+/"londonBlock": 0/' ./templates/cdk-erigon/chainspec.json sed -i 's/"normalcyBlock": [0-9]\+/"normalcyBlock": 0/' ./templates/cdk-erigon/chainspec.json sed -i 's/"shanghaiTime": [0-9]\+/"shanghaiTime": 0/' ./templates/cdk-erigon/chainspec.json @@ -243,7 +236,8 @@ jobs: - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + run: | + kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"erigon_strict_mode": false, "cdk_erigon_node_image": "cdk-erigon:local"}}' - name: Dynamic gas fee tx load test working-directory: ./kurtosis-cdk From 4d57f5df64fed47180385fb4b2d062d3a7b43bab Mon Sep 17 00:00:00 2001 From: Ji Hwan KIM <125336262+jhkimqd@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:56:26 +0900 Subject: [PATCH 05/88] test: add unit tests for zk/txpool/pool.go (#1396) * test: testnoncefromaddress Signed-off-by: Ji Hwan * test: new test testnoncefromaddress Signed-off-by: Ji Hwan * test: fix insufficient funds to test address Signed-off-by: Ji Hwan * chore: cleanup Signed-off-by: Ji Hwan * test: add test for processRemoteTxs() Signed-off-by: Ji Hwan * test: change expected nonce to match cdk erigon pool behaviour Signed-off-by: Ji Hwan * fix: revert nonce calculation logic in queued pool Signed-off-by: Ji Hwan * chore: lint Signed-off-by: Ji Hwan * fix: use []string to test all possible combinations for policy output Signed-off-by: Ji Hwan --------- Signed-off-by: Ji Hwan --- zk/txpool/policy_test.go | 35 ++++++-- zk/txpool/pool_test.go | 171 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 200 insertions(+), 6 deletions(-) create mode 100644 zk/txpool/pool_test.go diff --git a/zk/txpool/policy_test.go b/zk/txpool/policy_test.go index 63cf02914ce..b639cc4b1eb 100644 --- a/zk/txpool/policy_test.go +++ b/zk/txpool/policy_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strings" "testing" "time" @@ -94,6 +95,15 @@ func policyTransactionSliceEqual(a, b []PolicyTransaction) bool { return true } +func containsSubstring(slice []string, substring string) bool { + for _, str := range slice { + if strings.Contains(str, substring) { + return true + } + } + return false +} + func TestCheckDBsCreation(t *testing.T) { t.Parallel() @@ -239,20 +249,33 @@ func TestPolicyMapping(t *testing.T) { var policiesNone []byte var pListNone []Policy + // Expected outcomes - these are stored in []string, because reading policies doesn't guarantee order, and the returned values may be in arbitrary order. + // Therefore a []string is used to check if the returned values are within the expected combinations stored within the string slice. + var expectedAll []string + var expectedSendTx []string + var expectedDeploy []string + var expectedNone []string + + expectedAll = append(expectedAll, "\tsendTx: true\n\tdeploy: true") + expectedAll = append(expectedAll, "\tdeploy: true\n\tsendTx: true") + expectedSendTx = append(expectedSendTx, "\tsendTx: true") + expectedDeploy = append(expectedDeploy, "\tdeploy: true") + expectedNone = append(expectedNone, "") + var tests = []struct { policies []byte pList []Policy - want string + want []string }{ - {policiesAll, pListAll, "\tsendTx: true\n\tdeploy: true"}, - {policiesSendTx, pListSendTx, "\tsendTx: true"}, - {policiesDeploy, pListDeploy, "\tdeploy: true"}, - {policiesNone, pListNone, ""}, + {policiesAll, pListAll, expectedAll}, + {policiesSendTx, pListSendTx, expectedSendTx}, + {policiesDeploy, pListDeploy, expectedDeploy}, + {policiesNone, pListNone, expectedNone}, } for _, tt := range tests { t.Run("PolicyMapping", func(t *testing.T) { ans := policyMapping(tt.policies, tt.pList) - if ans != tt.want { + if !containsSubstring(tt.want, ans) { t.Errorf("got %v, want %v", ans, tt.want) } }) diff --git a/zk/txpool/pool_test.go b/zk/txpool/pool_test.go new file mode 100644 index 00000000000..ef80af15139 --- /dev/null +++ b/zk/txpool/pool_test.go @@ -0,0 +1,171 @@ +package txpool + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/u256" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNonceFromAddress(t *testing.T) { + assert, require := assert.New(t), require.New(t) + ch := make(chan types.Announcements, 100) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + defer coreDB.Close() + + db := memdb.NewTestPoolDB(t) + path := fmt.Sprintf("/tmp/db-test-%v", time.Now().UTC().Format(time.RFC3339Nano)) + txPoolDB := newTestTxPoolDB(t, path) + defer txPoolDB.Close() + aclsDB := newTestACLDB(t, path) + defer aclsDB.Close() + + // Check if the dbs are created. + require.NotNil(t, db) + require.NotNil(t, txPoolDB) + require.NotNil(t, aclsDB) + + cfg := txpoolcfg.DefaultConfig + ethCfg := ðconfig.Defaults + sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) + pool, err := New(ch, coreDB, cfg, ethCfg, sendersCache, *u256.N1, nil, nil, aclsDB) + assert.NoError(err) + require.True(pool != nil) + ctx := context.Background() + var stateVersionID uint64 = 0 + pendingBaseFee := uint64(200000) + h1 := gointerfaces.ConvertHashToH256([32]byte{}) + + // Create address for testing. + var addr [20]byte + addr[0] = 1 + + // Fund addr with 18 Ether for sending transactions. + v := make([]byte, types.EncodeSenderLengthForStorage(2, *uint256.NewInt(18 * common.Ether))) + types.EncodeSender(2, *uint256.NewInt(18 * common.Ether), v) + + change := &remote.StateChangeBatch{ + StateVersionId: stateVersionID, + PendingBlockBaseFee: pendingBaseFee, + BlockGasLimit: 1000000, + ChangeBatch: []*remote.StateChange{ + {BlockHeight: 0, BlockHash: h1}, + }, + } + change.ChangeBatch[0].Changes = append(change.ChangeBatch[0].Changes, &remote.AccountChange{ + Action: remote.Action_UPSERT, + Address: gointerfaces.ConvertAddressToH160(addr), + Data: v, + }) + tx, err := db.BeginRw(ctx) + require.NoError(err) + defer tx.Rollback() + err = pool.OnNewBlock(ctx, change, types.TxSlots{}, types.TxSlots{}, tx) + assert.NoError(err) + + { + var txSlots types.TxSlots + txSlot1 := &types.TxSlot{ + Tip: *uint256.NewInt(300000), + FeeCap: *uint256.NewInt(300000), + Gas: 100000, + Nonce: 3, + } + txSlot1.IDHash[0] = 1 + txSlots.Append(txSlot1, addr[:], true) + + reasons, err := pool.AddLocalTxs(ctx, txSlots, tx) + assert.NoError(err) + for _, reason := range reasons { + assert.Equal(Success, reason, reason.String()) + } + + // Add remote transactions, and check it processes. + pool.AddRemoteTxs(ctx, txSlots) + err = pool.processRemoteTxs(ctx) + assert.NoError(err) + + } + + // Test sending normal transactions with expected nonces. + { + txSlots := types.TxSlots{} + txSlot2 := &types.TxSlot{ + Tip: *uint256.NewInt(300000), + FeeCap: *uint256.NewInt(300000), + Gas: 100000, + Nonce: 4, + } + txSlot2.IDHash[0] = 2 + txSlot3 := &types.TxSlot{ + Tip: *uint256.NewInt(300000), + FeeCap: *uint256.NewInt(300000), + Gas: 100000, + Nonce: 6, + } + txSlot3.IDHash[0] = 3 + txSlots.Append(txSlot2, addr[:], true) + txSlots.Append(txSlot3, addr[:], true) + reasons, err := pool.AddLocalTxs(ctx, txSlots, tx) + assert.NoError(err) + for _, reason := range reasons { + assert.Equal(Success, reason, reason.String()) + } + + // Test NonceFromAddress function to check if the address' nonce is being properly tracked. + nonce, _ := pool.NonceFromAddress(addr) + // CDK Erigon will return 0, Upstream Erigon will return latest nonce including txns in the queued pool. + assert.Equal(uint64(0), nonce) + } + + // Test sending transactions without having enough balance for it. + { + var txSlots types.TxSlots + txSlot1 := &types.TxSlot{ + Tip: *uint256.NewInt(300000), + FeeCap: *uint256.NewInt(9 * common.Ether), + Gas: 100000, + Nonce: 3, + } + txSlot1.IDHash[0] = 4 + txSlots.Append(txSlot1, addr[:], true) + reasons, err := pool.AddLocalTxs(ctx, txSlots, tx) + assert.NoError(err) + for _, reason := range reasons { + assert.Equal(InsufficientFunds, reason, reason.String()) + } + } + + // Test sending transactions with too low nonce. + { + var txSlots types.TxSlots + txSlot1 := &types.TxSlot{ + Tip: *uint256.NewInt(300000), + FeeCap: *uint256.NewInt(300000), + Gas: 100000, + Nonce: 1, + } + txSlot1.IDHash[0] = 5 + txSlots.Append(txSlot1, addr[:], true) + reasons, err := pool.AddLocalTxs(ctx, txSlots, tx) + assert.NoError(err) + for _, reason := range reasons { + assert.Equal(NonceTooLow, reason, reason.String()) + } + } +} From f7f7615adc04704d23afa041a90944d6ab310f0d Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:58:03 +0000 Subject: [PATCH 06/88] agg layer network sync fixes (#1388) handles pre etrog events properly and filters on rollup Co-authored-by: Max Revitt --- cmd/utils/flags.go | 5 ++++ eth/ethconfig/config_zkevm.go | 1 + turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 17 ++++++++++++++ zk/stages/stage_sequence_execute.go | 20 ++++++++++++++-- zk/stages/stage_sequence_execute_batch.go | 28 +++++++++++------------ zk/stages/stage_sequence_execute_state.go | 2 +- 7 files changed, 57 insertions(+), 17 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 273a5a9b1d5..45290535b50 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -733,6 +733,11 @@ var ( Usage: "The multiplier to reduce the SMT depth by when calculating virtual counters", Value: 0.6, } + BadBatches = cli.StringFlag{ + Name: "zkevm.bad-batches", + Usage: "A comma separated list of batch numbers that are known bad on the L1. These will automatically be marked as bad during L1 recovery", + Value: "", + } InitialBatchCfgFile = cli.StringFlag{ Name: "zkevm.initial-batch.config", Usage: "The file that contains the initial (injected) batch data.", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 490aaa62613..89ff70fc810 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -90,6 +90,7 @@ type Zk struct { InitialBatchCfgFile string ACLPrintHistory int InfoTreeUpdateInterval time.Duration + BadBatches []uint64 } var DefaultZkConfig = &Zk{} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index b3cd3537cd6..682bbbf2bec 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -281,6 +281,7 @@ var DefaultFlags = []cli.Flag{ &utils.DisableVirtualCounters, &utils.DAUrl, &utils.VirtualCountersSmtReduction, + &utils.BadBatches, &utils.InitialBatchCfgFile, &utils.ACLPrintHistory, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 2f459516a9e..02d773dc363 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/zk/sequencer" utils2 "github.com/ledgerwatch/erigon/zk/utils" "github.com/urfave/cli/v2" + "strconv" ) var DeprecatedFlags = map[string]string{ @@ -112,6 +113,21 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { witnessMemSize := utils.DatasizeFlagValue(ctx, utils.WitnessMemdbSize.Name) + badBatchStrings := strings.Split(ctx.String(utils.BadBatches.Name), ",") + badBatches := make([]uint64, 0) + for _, s := range badBatchStrings { + if s == "" { + // if there are no entries then we can just ignore it and move on + continue + } + // parse the string as uint64 + val, err := strconv.ParseUint(s, 10, 64) + if err != nil { + panic(fmt.Sprintf("could not parse bad batch number %s", s)) + } + badBatches = append(badBatches, val) + } + cfg.Zk = ðconfig.Zk{ L2ChainId: ctx.Uint64(utils.L2ChainIdFlag.Name), L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name), @@ -184,6 +200,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { DataStreamWriteTimeout: ctx.Duration(utils.DataStreamWriteTimeout.Name), DataStreamInactivityTimeout: ctx.Duration(utils.DataStreamInactivityTimeout.Name), VirtualCountersSmtReduction: ctx.Float64(utils.VirtualCountersSmtReduction.Name), + BadBatches: badBatches, InitialBatchCfgFile: ctx.String(utils.InitialBatchCfgFile.Name), ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name), InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 82b397ee8df..5387b30fe91 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -195,8 +195,24 @@ func sequencingBatchStep( return nil } - if handled, err := doCheckForBadBatch(batchContext, batchState, executionAt); err != nil || handled { - return err + bad := false + for _, batch := range cfg.zk.BadBatches { + if batch == batchState.batchNumber { + bad = true + break + } + } + + // if we aren't forcing a bad batch then check it + if !bad { + bad, err = doCheckForBadBatch(batchContext, batchState, executionAt) + if err != nil { + return err + } + } + + if bad { + return writeBadBatchDetails(batchContext, batchState, executionAt) } } diff --git a/zk/stages/stage_sequence_execute_batch.go b/zk/stages/stage_sequence_execute_batch.go index b4bf892b639..5f110235fe4 100644 --- a/zk/stages/stage_sequence_execute_batch.go +++ b/zk/stages/stage_sequence_execute_batch.go @@ -62,28 +62,28 @@ func doCheckForBadBatch(batchContext *BatchContext, batchState *BatchState, this return false, err } - if !badBatch { - return false, nil - } + return badBatch, nil +} +func writeBadBatchDetails(batchContext *BatchContext, batchState *BatchState, blockNumber uint64) error { log.Info(fmt.Sprintf("[%s] Skipping bad batch %d...", batchContext.s.LogPrefix(), batchState.batchNumber)) // store the fact that this batch was invalid during recovery - will be used for the stream later - if err = batchContext.sdb.hermezDb.WriteInvalidBatch(batchState.batchNumber); err != nil { - return false, err + if err := batchContext.sdb.hermezDb.WriteInvalidBatch(batchState.batchNumber); err != nil { + return err } - if err = batchContext.sdb.hermezDb.WriteBatchCounters(currentBlock.NumberU64(), []int{}); err != nil { - return false, err + if err := batchContext.sdb.hermezDb.WriteBatchCounters(blockNumber, []int{}); err != nil { + return err } - if err = stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { - return false, err + if err := stages.SaveStageProgress(batchContext.sdb.tx, stages.HighestSeenBatchNumber, batchState.batchNumber); err != nil { + return err } - if err = batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { - return false, err + if err := batchContext.sdb.hermezDb.WriteForkId(batchState.batchNumber, batchState.forkId); err != nil { + return err } - if err = batchContext.sdb.tx.Commit(); err != nil { - return false, err + if err := batchContext.sdb.tx.Commit(); err != nil { + return err } - return true, nil + return nil } func updateStreamAndCheckRollback( diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 87b4c72bc78..4e74f6210a8 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -157,7 +157,7 @@ func (bs *BatchState) getBlockHeaderForcedTimestamp() uint64 { } func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { - if bs.isL1Recovery() { + if bs.batchNumber > 1 && bs.isL1Recovery() { return bs.batchL1RecoveryData.recoveredBatchData.Coinbase } From d1079c0d56985cafd340077b722a7856e13f1051 Mon Sep 17 00:00:00 2001 From: Ji Hwan KIM <125336262+jhkimqd@users.noreply.github.com> Date: Tue, 5 Nov 2024 01:49:03 +0900 Subject: [PATCH 07/88] fix: change return value of ListContentAtACL and write test for it (#1373) * fix: change return value of ListContentAtACL and write test for it Signed-off-by: Ji Hwan * chore: cleanup + fix test cases Signed-off-by: Ji Hwan --------- Signed-off-by: Ji Hwan --- zk/txpool/policy.go | 28 ++++++++++++++++++---- zk/txpool/policy_test.go | 50 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/zk/txpool/policy.go b/zk/txpool/policy.go index 5755928eb37..920003b94cb 100644 --- a/zk/txpool/policy.go +++ b/zk/txpool/policy.go @@ -561,12 +561,15 @@ func RemovePolicy(ctx context.Context, aclDB kv.RwDB, aclType string, addr commo return err } -func ListContentAtACL(ctx context.Context, db kv.RwDB) (string, error) { - +func ListContentAtACL(ctx context.Context, db kv.RwDB) ([]string, error) { + var combinedBuffers []string var buffer bytes.Buffer + var bufferConfig bytes.Buffer + var bufferBlockList bytes.Buffer + var bufferAllowlist bytes.Buffer tables := db.AllTables() - buffer.WriteString("ListContentAtACL\n") + buffer.WriteString(" \n") buffer.WriteString("Tables\nTable - { Flags, AutoDupSortKeysConversion, IsDeprecated, DBI, DupFromLen, DupToLen }\n") for key, config := range tables { buffer.WriteString(fmt.Sprint(key, config, "\n")) @@ -577,6 +580,7 @@ func ListContentAtACL(ctx context.Context, db kv.RwDB) (string, error) { buffer.WriteString("\nConfig\n") err := tx.ForEach(Config, nil, func(k, v []byte) error { buffer.WriteString(fmt.Sprintf("Key: %s, Value: %s\n", string(k), string(v))) + bufferConfig.WriteString(fmt.Sprintf("Key: %s, Value: %s\n", string(k), string(v))) return nil }) @@ -598,10 +602,14 @@ func ListContentAtACL(ctx context.Context, db kv.RwDB) (string, error) { "\nBlocklist\n%s", BlockListContent.String(), )) + bufferBlockList.WriteString(fmt.Sprintf( + "\nBlocklist\n%s", + BlockListContent.String(), + )) } else { buffer.WriteString("\nBlocklist is empty") + bufferBlockList.WriteString("\nBlocklist is empty") } - // Allowlist table var AllowlistContent strings.Builder err = tx.ForEach(Allowlist, nil, func(k, v []byte) error { @@ -620,14 +628,24 @@ func ListContentAtACL(ctx context.Context, db kv.RwDB) (string, error) { "\nAllowlist\n%s", AllowlistContent.String(), )) + bufferAllowlist.WriteString(fmt.Sprintf( + "\nAllowlist\n%s", + AllowlistContent.String(), + )) } else { buffer.WriteString("\nAllowlist is empty") + bufferAllowlist.WriteString("\nAllowlist is empty") } return err }) - return buffer.String(), err + combinedBuffers = append(combinedBuffers, buffer.String()) + combinedBuffers = append(combinedBuffers, bufferConfig.String()) + combinedBuffers = append(combinedBuffers, bufferBlockList.String()) + combinedBuffers = append(combinedBuffers, bufferAllowlist.String()) + + return combinedBuffers, err } // SetMode sets the mode of the ACL diff --git a/zk/txpool/policy_test.go b/zk/txpool/policy_test.go index b639cc4b1eb..d12d59eea39 100644 --- a/zk/txpool/policy_test.go +++ b/zk/txpool/policy_test.go @@ -647,3 +647,53 @@ func TestIsActionAllowed(t *testing.T) { require.True(t, allowed) // In disabled mode, all actions are allowed }) } + +func TestListContentAtACL(t *testing.T) { + db := newTestACLDB(t, "") + ctx := context.Background() + + // Populate different tables in ACL + // Create a test address and policy for allowlist table + addrAllowlist := common.HexToAddress("0x1234567890abcdef") + policyAllowlist := SendTx + + err := AddPolicy(ctx, db, "allowlist", addrAllowlist, policyAllowlist) + require.NoError(t, err) + + // Create a test address and policy for blocklist table + addrBlocklist := common.HexToAddress("0x1234567890abcdef") + policyBlocklist := SendTx + + err = AddPolicy(ctx, db, "blocklist", addrBlocklist, policyBlocklist) + require.NoError(t, err) + + var tests = []struct { + wantAllowlist string + wantBlockList string + }{ + {"\nAllowlist\nKey: 0000000000000000000000001234567890abcdef, Value: {\n\tdeploy: false\n\tsendTx: true\n}\n", "\nBlocklist\nKey: 0000000000000000000000001234567890abcdef, Value: {\n\tsendTx: true\n\tdeploy: false\n}\n"}, + } + // ListContentAtACL will return []string in the following order: + // [buffer.String(), bufferConfig.String(), bufferBlockList.String(), bufferAllowlist.String()] + ans, err := ListContentAtACL(ctx, db) + for _, tt := range tests { + t.Run("ListContentAtACL", func(t *testing.T) { + switch { + case err != nil: + t.Errorf("ListContentAtACL did not execute successfully: %v", err) + case !strings.Contains(ans[3], "\nAllowlist\nKey: 0000000000000000000000001234567890abcdef"): + t.Errorf("got %v, want %v", ans, tt.wantAllowlist) + case !strings.Contains(ans[3], "sendTx: true"): + t.Errorf("got %v, want %v", ans, tt.wantAllowlist) + case !strings.Contains(ans[3], "deploy: false"): + t.Errorf("got %v, want %v", ans, tt.wantAllowlist) + case !strings.Contains(ans[2], "\nBlocklist\nKey: 0000000000000000000000001234567890abcdef"): + t.Errorf("got %v, want %v", ans, tt.wantBlockList) + case !strings.Contains(ans[2], "sendTx: true"): + t.Errorf("got %v, want %v", ans, tt.wantBlockList) + case !strings.Contains(ans[2], "deploy: false"): + t.Errorf("got %v, want %v", ans, tt.wantBlockList) + } + }) + } +} From df6709f065b59a299e893b30db714b82b8a0e41e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Mon, 4 Nov 2024 20:36:32 +0100 Subject: [PATCH 08/88] feat: Decode witness to SMT (#1363) * feat: Decode witness to SMT * chore: warning fixes and simplifications * test: use require in witness unit tests * feat: simplifications in SMT state reader * fix: address comment * test: use requires * Allocate array in getValueInBytes --- smt/pkg/db/mdbx.go | 14 +++ smt/pkg/smt/entity_storage.go | 69 +++++++----- smt/pkg/smt/smt.go | 94 +++++++++++++++- smt/pkg/smt/smt_state_reader.go | 193 ++++++++++++++++++++++++++++++++ smt/pkg/smt/witness.go | 169 +++++++++++++++++++++++++++- smt/pkg/smt/witness_test.go | 160 +++++++++++++++++++------- 6 files changed, 628 insertions(+), 71 deletions(-) create mode 100644 smt/pkg/smt/smt_state_reader.go diff --git a/smt/pkg/db/mdbx.go b/smt/pkg/db/mdbx.go index 18351d0fe6c..adca963eaac 100644 --- a/smt/pkg/db/mdbx.go +++ b/smt/pkg/db/mdbx.go @@ -2,6 +2,7 @@ package db import ( "context" + "encoding/hex" "math/big" "fmt" @@ -304,6 +305,19 @@ func (m *EriRoDb) GetCode(codeHash []byte) ([]byte, error) { return data, nil } +func (m *EriDb) AddCode(code []byte) error { + codeHash := utils.HashContractBytecode(hex.EncodeToString(code)) + + codeHashBytes, err := hex.DecodeString(strings.TrimPrefix(codeHash, "0x")) + if err != nil { + return err + } + + codeHashBytes = utils.ResizeHashTo32BytesByPrefixingWithZeroes(codeHashBytes) + + return m.tx.Put(kv.Code, codeHashBytes, code) +} + func (m *EriRoDb) PrintDb() { err := m.kvTxRo.ForEach(TableSmt, []byte{}, func(k, v []byte) error { println(string(k), string(v)) diff --git a/smt/pkg/smt/entity_storage.go b/smt/pkg/smt/entity_storage.go index e33a6d06357..261b27103cd 100644 --- a/smt/pkg/smt/entity_storage.go +++ b/smt/pkg/smt/entity_storage.go @@ -14,30 +14,55 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/utils" ) +// SetAccountState sets the balance and nonce of an account func (s *SMT) SetAccountState(ethAddr string, balance, nonce *big.Int) (*big.Int, error) { + _, err := s.SetAccountBalance(ethAddr, balance) + if err != nil { + return nil, err + } + + auxOut, err := s.SetAccountNonce(ethAddr, nonce) + if err != nil { + return nil, err + } + + return auxOut, nil +} + +// SetAccountBalance sets the balance of an account +func (s *SMT) SetAccountBalance(ethAddr string, balance *big.Int) (*big.Int, error) { keyBalance := utils.KeyEthAddrBalance(ethAddr) - keyNonce := utils.KeyEthAddrNonce(ethAddr) - if _, err := s.InsertKA(keyBalance, balance); err != nil { + response, err := s.InsertKA(keyBalance, balance) + if err != nil { return nil, err } ks := utils.EncodeKeySource(utils.KEY_BALANCE, utils.ConvertHexToAddress(ethAddr), common.Hash{}) - if err := s.Db.InsertKeySource(keyBalance, ks); err != nil { + err = s.Db.InsertKeySource(keyBalance, ks) + if err != nil { return nil, err } - auxRes, err := s.InsertKA(keyNonce, nonce) + return response.NewRootScalar.ToBigInt(), err +} + +// SetAccountNonce sets the nonce of an account +func (s *SMT) SetAccountNonce(ethAddr string, nonce *big.Int) (*big.Int, error) { + keyNonce := utils.KeyEthAddrNonce(ethAddr) + + response, err := s.InsertKA(keyNonce, nonce) if err != nil { return nil, err } - ks = utils.EncodeKeySource(utils.KEY_NONCE, utils.ConvertHexToAddress(ethAddr), common.Hash{}) - if err := s.Db.InsertKeySource(keyNonce, ks); err != nil { + ks := utils.EncodeKeySource(utils.KEY_NONCE, utils.ConvertHexToAddress(ethAddr), common.Hash{}) + err = s.Db.InsertKeySource(keyNonce, ks) + if err != nil { return nil, err } - return auxRes.NewRootScalar.ToBigInt(), nil + return response.NewRootScalar.ToBigInt(), nil } func (s *SMT) SetAccountStorage(addr libcommon.Address, acc *accounts.Account) error { @@ -80,13 +105,7 @@ func (s *SMT) SetContractBytecode(ethAddr string, bytecode string) error { ks = utils.EncodeKeySource(utils.SC_LENGTH, utils.ConvertHexToAddress(ethAddr), common.Hash{}) - err = s.Db.InsertKeySource(keyContractLength, ks) - - if err != nil { - return err - } - - return err + return s.Db.InsertKeySource(keyContractLength, ks) } func (s *SMT) SetContractStorage(ethAddr string, storage map[string]string, progressChan chan uint64) (*big.Int, error) { @@ -203,7 +222,7 @@ func (s *SMT) SetStorage(ctx context.Context, logPrefix string, accChanges map[l for addr, acc := range accChanges { select { case <-ctx.Done(): - return nil, nil, fmt.Errorf(fmt.Sprintf("[%s] Context done", logPrefix)) + return nil, nil, fmt.Errorf("[%s] Context done", logPrefix) default: } ethAddr := addr.String() @@ -250,7 +269,7 @@ func (s *SMT) SetStorage(ctx context.Context, logPrefix string, accChanges map[l for addr, code := range codeChanges { select { case <-ctx.Done(): - return nil, nil, fmt.Errorf(fmt.Sprintf("[%s] Context done", logPrefix)) + return nil, nil, fmt.Errorf("[%s] Context done", logPrefix) default: } @@ -295,7 +314,7 @@ func (s *SMT) SetStorage(ctx context.Context, logPrefix string, accChanges map[l for addr, storage := range storageChanges { select { case <-ctx.Done(): - return nil, nil, fmt.Errorf(fmt.Sprintf("[%s] Context done", logPrefix)) + return nil, nil, fmt.Errorf("[%s] Context done", logPrefix) default: } ethAddr := addr.String() @@ -304,7 +323,7 @@ func (s *SMT) SetStorage(ctx context.Context, logPrefix string, accChanges map[l for k, v := range storage { keyStoragePosition := utils.KeyContractStorage(ethAddrBigIngArray, k) - valueBigInt := convertStrintToBigInt(v) + valueBigInt := convertStringToBigInt(v) keysBatchStorage = append(keysBatchStorage, &keyStoragePosition) if valuesBatchStorage, isDelete, err = appendToValuesBatchStorageBigInt(valuesBatchStorage, valueBigInt); err != nil { return nil, nil, err @@ -341,7 +360,7 @@ func (s *SMT) DeleteKeySource(nodeKey *utils.NodeKey) error { } func calcHashVal(v string) (*utils.NodeValue8, [4]uint64, error) { - val := convertStrintToBigInt(v) + val := convertStringToBigInt(v) x := utils.ScalarToArrayBig(val) value, err := utils.NodeValue8FromBigIntArray(x) @@ -354,10 +373,10 @@ func calcHashVal(v string) (*utils.NodeValue8, [4]uint64, error) { return value, h, nil } -func convertStrintToBigInt(v string) *big.Int { +func convertStringToBigInt(v string) *big.Int { base := 10 if strings.HasPrefix(v, "0x") { - v = v[2:] + v = strings.TrimPrefix(v, "0x") base = 16 } @@ -374,14 +393,8 @@ func appendToValuesBatchStorageBigInt(valuesBatchStorage []*utils.NodeValue8, va } func convertBytecodeToBigInt(bytecode string) (*big.Int, int, error) { - var parsedBytecode string bi := utils.HashContractBytecodeBigInt(bytecode) - - if strings.HasPrefix(bytecode, "0x") { - parsedBytecode = bytecode[2:] - } else { - parsedBytecode = bytecode - } + parsedBytecode := strings.TrimPrefix(bytecode, "0x") if len(parsedBytecode)%2 != 0 { parsedBytecode = "0" + parsedBytecode diff --git a/smt/pkg/smt/smt.go b/smt/pkg/smt/smt.go index 1c2a8a386e4..50d0221916d 100644 --- a/smt/pkg/smt/smt.go +++ b/smt/pkg/smt/smt.go @@ -23,6 +23,7 @@ type DB interface { InsertKeySource(key utils.NodeKey, value []byte) error DeleteKeySource(key utils.NodeKey) error InsertHashKey(key utils.NodeKey, value utils.NodeKey) error + AddCode(code []byte) error DeleteHashKey(key utils.NodeKey) error Delete(string) error DeleteByNodeKey(key utils.NodeKey) error @@ -297,7 +298,9 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old if err != nil { return nil, err } - s.Db.InsertHashKey(newLeafHash, k) + if err := s.Db.InsertHashKey(newLeafHash, k); err != nil { + return nil, err + } if level >= 0 { for j := 0; j < 4; j++ { siblings[level][keys[level]*4+j] = new(big.Int).SetUint64(newLeafHash[j]) @@ -649,7 +652,7 @@ func (s *SMT) updateDepth(newDepth int) { newDepthAsByte := byte(newDepth & 0xFF) if oldDepth < newDepthAsByte { - s.Db.SetDepth(newDepthAsByte) + _ = s.Db.SetDepth(newDepthAsByte) } } @@ -728,3 +731,90 @@ func (s *RoSMT) traverseAndMark(ctx context.Context, node *big.Int, visited Visi return true, nil }) } + +// InsertHashNode inserts a hash node into the SMT. The SMT should not contain any other leaf nodes with the same path prefix. Otherwise, the new root hash will be incorrect. +// TODO: Support insertion of hash nodes even if there are leaf nodes with the same path prefix in SMT. +func (s *SMT) InsertHashNode(path []int, hash *big.Int) (*big.Int, error) { + s.clearUpMutex.Lock() + defer s.clearUpMutex.Unlock() + + or, err := s.getLastRoot() + if err != nil { + return nil, err + } + + h := utils.ScalarToArray(hash) + + var nodeHash [4]uint64 + copy(nodeHash[:], h[:4]) + + lastRoot, err := s.insertHashNode(path, nodeHash, or) + if err != nil { + return nil, err + } + + if err = s.setLastRoot(lastRoot); err != nil { + return nil, err + } + + return lastRoot.ToBigInt(), nil +} + +func (s *SMT) insertHashNode(path []int, hash [4]uint64, root utils.NodeKey) (utils.NodeKey, error) { + if len(path) == 0 { + newValHBig := utils.ArrayToScalar(hash[:]) + v := utils.ScalarToNodeValue8(newValHBig) + + err := s.hashSave(v.ToUintArray(), utils.LeafCapacity, hash) + if err != nil { + return utils.NodeKey{}, err + } + + return hash, nil + } + + rootVal := utils.NodeValue12{} + + if !root.IsZero() { + v, err := s.Db.Get(root) + if err != nil { + return utils.NodeKey{}, err + } + + rootVal = v + } + + childIndex := path[0] + + childOldRoot := rootVal[childIndex*4 : childIndex*4+4] + + childNewRoot, err := s.insertHashNode(path[1:], hash, utils.NodeKeyFromBigIntArray(childOldRoot)) + + if err != nil { + return utils.NodeKey{}, err + } + + var newIn [8]uint64 + + emptyRootVal := utils.NodeValue12{} + + if childIndex == 0 { + var sibling [4]uint64 + if rootVal == emptyRootVal { + sibling = [4]uint64{0, 0, 0, 0} + } else { + sibling = *rootVal.Get4to8() + } + newIn = utils.ConcatArrays4(childNewRoot, sibling) + } else { + var sibling [4]uint64 + if rootVal == emptyRootVal { + sibling = [4]uint64{0, 0, 0, 0} + } else { + sibling = *rootVal.Get0to4() + } + newIn = utils.ConcatArrays4(sibling, childNewRoot) + } + + return s.hashcalcAndSave(newIn, utils.BranchCapacity) +} diff --git a/smt/pkg/smt/smt_state_reader.go b/smt/pkg/smt/smt_state_reader.go new file mode 100644 index 00000000000..4e1b4849497 --- /dev/null +++ b/smt/pkg/smt/smt_state_reader.go @@ -0,0 +1,193 @@ +package smt + +import ( + "bytes" + "context" + "errors" + "math/big" + + "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/smt/pkg/utils" + "github.com/ledgerwatch/erigon/zkevm/log" +) + +var _ state.StateReader = (*SMT)(nil) + +// ReadAccountData reads account data from the SMT +func (s *SMT) ReadAccountData(address libcommon.Address) (*accounts.Account, error) { + balance, err := s.GetAccountBalance(address) + if err != nil { + return nil, err + } + + nonce, err := s.GetAccountNonce(address) + if err != nil { + return nil, err + } + + codeHash, err := s.GetAccountCodeHash(address) + if err != nil { + return nil, err + } + + account := &accounts.Account{ + Balance: *balance, + Nonce: nonce.Uint64(), + CodeHash: codeHash, + Root: libcommon.Hash{}, + } + + return account, nil +} + +// ReadAccountStorage reads account storage from the SMT (not implemented for SMT) +func (s *SMT) ReadAccountStorage(address libcommon.Address, incarnation uint64, key *libcommon.Hash) ([]byte, error) { + value, err := s.getValue(0, address, key) + if err != nil { + return []byte{}, err + } + + return value, nil +} + +// ReadAccountCode reads account code from the SMT +func (s *SMT) ReadAccountCode(address libcommon.Address, incarnation uint64, codeHash libcommon.Hash) ([]byte, error) { + code, err := s.Db.GetCode(codeHash.Bytes()) + if err != nil { + return []byte{}, err + } + + return code, nil +} + +// ReadAccountCodeSize reads account code size from the SMT +func (s *SMT) ReadAccountCodeSize(address libcommon.Address, _ uint64, _ libcommon.Hash) (int, error) { + valueInBytes, err := s.getValue(utils.SC_LENGTH, address, nil) + if err != nil { + return 0, err + } + + sizeBig := big.NewInt(0).SetBytes(valueInBytes) + + if !sizeBig.IsInt64() { + err = errors.New("code size value is too large to fit into an int") + return 0, err + } + + sizeInt64 := sizeBig.Int64() + if sizeInt64 > int64(^uint(0)>>1) { + err = errors.New("code size value overflows int") + log.Error("failed to get account code size", "error", err) + return 0, err + } + + return int(sizeInt64), nil +} + +// ReadAccountIncarnation reads account incarnation from the SMT (not implemented for SMT) +func (s *SMT) ReadAccountIncarnation(_ libcommon.Address) (uint64, error) { + return 0, errors.New("ReadAccountIncarnation not implemented for SMT") +} + +// GetAccountBalance returns the balance of an account from the SMT +func (s *SMT) GetAccountBalance(address libcommon.Address) (*uint256.Int, error) { + valueInBytes, err := s.getValue(utils.KEY_BALANCE, address, nil) + if err != nil { + log.Error("failed to get balance", "error", err) + return nil, err + } + + balance := uint256.NewInt(0).SetBytes(valueInBytes) + + return balance, nil +} + +// GetAccountNonce returns the nonce of an account from the SMT +func (s *SMT) GetAccountNonce(address libcommon.Address) (*uint256.Int, error) { + valueInBytes, err := s.getValue(utils.KEY_NONCE, address, nil) + if err != nil { + log.Error("failed to get nonce", "error", err) + return nil, err + } + + nonce := uint256.NewInt(0).SetBytes(valueInBytes) + + return nonce, nil +} + +// GetAccountCodeHash returns the code hash of an account from the SMT +func (s *SMT) GetAccountCodeHash(address libcommon.Address) (libcommon.Hash, error) { + valueInBytes, err := s.getValue(utils.SC_CODE, address, nil) + if err != nil { + log.Error("failed to get code hash", "error", err) + return libcommon.Hash{}, err + } + + codeHash := libcommon.Hash{} + codeHash.SetBytes(valueInBytes) + + return codeHash, nil +} + +// getValue returns the value of a key from SMT by traversing the SMT +func (s *SMT) getValue(key int, address libcommon.Address, storageKey *libcommon.Hash) ([]byte, error) { + var kn utils.NodeKey + + if storageKey == nil { + kn = utils.Key(address.String(), key) + } else { + a := utils.ConvertHexToBigInt(address.String()) + add := utils.ScalarToArrayBig(a) + + kn = utils.KeyContractStorage(add, storageKey.String()) + } + + return s.getValueInBytes(kn) +} + +// getValueInBytes returns the value of a key from SMT in bytes by traversing the SMT +func (s *SMT) getValueInBytes(nodeKey utils.NodeKey) ([]byte, error) { + value := []byte{} + + keyPath := nodeKey.GetPath() + + keyPathBytes := make([]byte, len(keyPath)) + for i, k := range keyPath { + keyPathBytes[i] = byte(k) + } + + action := func(prefix []byte, _ utils.NodeKey, v utils.NodeValue12) (bool, error) { + if !bytes.HasPrefix(keyPathBytes, prefix) { + return false, nil + } + + if v.IsFinalNode() { + valHash := v.Get4to8() + v, err := s.Db.Get(*valHash) + if err != nil { + return false, err + } + vInBytes := utils.ArrayBigToScalar(utils.BigIntArrayFromNodeValue8(v.GetNodeValue8())).Bytes() + + value = vInBytes + return false, nil + } + + return true, nil + } + + root, err := s.Db.GetLastRoot() + if err != nil { + return nil, err + } + + err = s.Traverse(context.Background(), root, action) + if err != nil { + return nil, err + } + + return value, nil +} diff --git a/smt/pkg/smt/witness.go b/smt/pkg/smt/witness.go index ce64d08107e..5fc7d64e336 100644 --- a/smt/pkg/smt/witness.go +++ b/smt/pkg/smt/witness.go @@ -2,12 +2,16 @@ package smt import ( "context" + "fmt" + "math/big" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/status-im/keycard-go/hexutils" ) +// BuildWitness creates a witness from the SMT func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Witness, error) { operands := make([]trie.WitnessOperator, 0) @@ -33,7 +37,7 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit This algorithm adds a little bit more nodes to the witness but it ensures that all requiring nodes are included. */ - retain := true + var retain bool prefixLen := len(prefix) if prefixLen > 0 { @@ -112,3 +116,166 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit return trie.NewWitness(operands), err } + +// BuildSMTfromWitness builds SMT from witness +func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { + // using memdb + s := NewSMT(nil, false) + + balanceMap := make(map[string]*big.Int) + nonceMap := make(map[string]*big.Int) + contractMap := make(map[string]string) + storageMap := make(map[string]map[string]string) + + path := make([]int, 0) + + firstNode := true + NodeChildCountMap := make(map[string]uint32) + NodesBranchValueMap := make(map[string]uint32) + + type nodeHash struct { + path []int + hash libcommon.Hash + } + + nodeHashes := make([]nodeHash, 0) + + for i, operator := range w.Operators { + switch op := operator.(type) { + case *trie.OperatorSMTLeafValue: + valScaler := big.NewInt(0).SetBytes(op.Value) + addr := libcommon.BytesToAddress(op.Address) + + switch op.NodeType { + case utils.KEY_BALANCE: + balanceMap[addr.String()] = valScaler + + case utils.KEY_NONCE: + nonceMap[addr.String()] = valScaler + + case utils.SC_STORAGE: + if _, ok := storageMap[addr.String()]; !ok { + storageMap[addr.String()] = make(map[string]string) + } + + stKey := hexutils.BytesToHex(op.StorageKey) + if len(stKey) > 0 { + stKey = fmt.Sprintf("0x%s", stKey) + } + + storageMap[addr.String()][stKey] = valScaler.String() + } + + path = path[:len(path)-1] + NodeChildCountMap[intArrayToString(path)] += 1 + + for len(path) != 0 && NodeChildCountMap[intArrayToString(path)] == NodesBranchValueMap[intArrayToString(path)] { + path = path[:len(path)-1] + } + if NodeChildCountMap[intArrayToString(path)] < NodesBranchValueMap[intArrayToString(path)] { + path = append(path, 1) + } + + case *trie.OperatorCode: + addr := libcommon.BytesToAddress(w.Operators[i+1].(*trie.OperatorSMTLeafValue).Address) + + code := hexutils.BytesToHex(op.Code) + if len(code) > 0 { + if err := s.Db.AddCode(hexutils.HexToBytes(code)); err != nil { + return nil, err + } + code = fmt.Sprintf("0x%s", code) + } + + contractMap[addr.String()] = code + + case *trie.OperatorBranch: + if firstNode { + firstNode = false + } else { + NodeChildCountMap[intArrayToString(path[:len(path)-1])] += 1 + } + + switch op.Mask { + case 1: + NodesBranchValueMap[intArrayToString(path)] = 1 + path = append(path, 0) + case 2: + NodesBranchValueMap[intArrayToString(path)] = 1 + path = append(path, 1) + case 3: + NodesBranchValueMap[intArrayToString(path)] = 2 + path = append(path, 0) + } + + case *trie.OperatorHash: + pathCopy := make([]int, len(path)) + copy(pathCopy, path) + nodeHashes = append(nodeHashes, nodeHash{path: pathCopy, hash: op.Hash}) + + path = path[:len(path)-1] + NodeChildCountMap[intArrayToString(path)] += 1 + + for len(path) != 0 && NodeChildCountMap[intArrayToString(path)] == NodesBranchValueMap[intArrayToString(path)] { + path = path[:len(path)-1] + } + if NodeChildCountMap[intArrayToString(path)] < NodesBranchValueMap[intArrayToString(path)] { + path = append(path, 1) + } + + default: + // Unsupported operator type + return nil, fmt.Errorf("unsupported operator type: %T", op) + } + } + + for _, nodeHash := range nodeHashes { + _, err := s.InsertHashNode(nodeHash.path, nodeHash.hash.Big()) + if err != nil { + return nil, err + } + + _, err = s.Db.GetLastRoot() + if err != nil { + return nil, err + } + } + + for addr, balance := range balanceMap { + _, err := s.SetAccountBalance(addr, balance) + if err != nil { + return nil, err + } + } + + for addr, nonce := range nonceMap { + _, err := s.SetAccountNonce(addr, nonce) + if err != nil { + return nil, err + } + } + + for addr, code := range contractMap { + err := s.SetContractBytecode(addr, code) + if err != nil { + return nil, err + } + } + + for addr, storage := range storageMap { + _, err := s.SetContractStorage(addr, storage, nil) + if err != nil { + fmt.Println("error : unable to set contract storage", err) + } + } + + return s, nil +} + +func intArrayToString(a []int) string { + s := "" + for _, v := range a { + s += fmt.Sprintf("%d", v) + } + return s +} diff --git a/smt/pkg/smt/witness_test.go b/smt/pkg/smt/witness_test.go index ca0dcdf2f91..b055f375e78 100644 --- a/smt/pkg/smt/witness_test.go +++ b/smt/pkg/smt/witness_test.go @@ -16,9 +16,12 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/smt" "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/stretchr/testify/require" ) func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { + t.Helper() + contract := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") balance := uint256.NewInt(1000000000) sKey := libcommon.HexToHash("0x5") @@ -43,46 +46,46 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { intraBlockState.AddBalance(contract, balance) intraBlockState.SetState(contract, &sKey, *sVal) - if err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.TrieStateWriter()); err != nil { - t.Errorf("error finalising 1st tx: %v", err) - } - if err := intraBlockState.CommitBlock(&chain.Rules{}, w); err != nil { - t.Errorf("error committing block: %v", err) - } + err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.TrieStateWriter()) + require.NoError(t, err, "error finalising 1st tx") - rl, err := tds.ResolveSMTRetainList() + err = intraBlockState.CommitBlock(&chain.Rules{}, w) + require.NoError(t, err, "error committing block") - if err != nil { - t.Errorf("error resolving state trie: %v", err) - } + rl, err := tds.ResolveSMTRetainList() + require.NoError(t, err, "error resolving state trie") memdb := db.NewMemDb() smtTrie := smt.NewSMT(memdb, false) - smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(1).ToBig()) - smtTrie.SetContractBytecode(contract.String(), hex.EncodeToString(code)) - err = memdb.AddCode(code) + _, err = smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(1).ToBig()) + require.NoError(t, err) - if err != nil { - t.Errorf("error adding code to memdb: %v", err) - } + err = smtTrie.SetContractBytecode(contract.String(), hex.EncodeToString(code)) + require.NoError(t, err) + + err = memdb.AddCode(code) + require.NoError(t, err, "error adding code to memdb") storage := make(map[string]string, 0) for i := 0; i < 100; i++ { - k := libcommon.HexToHash(fmt.Sprintf("0x%d", i)) - storage[k.String()] = k.String() + k := libcommon.HexToHash(fmt.Sprintf("0x%d", i)).String() + storage[k] = k } storage[sKey.String()] = sVal.String() - smtTrie.SetContractStorage(contract.String(), storage, nil) + _, err = smtTrie.SetContractStorage(contract.String(), storage, nil) + require.NoError(t, err) return smtTrie, rl } func findNode(t *testing.T, w *trie.Witness, addr libcommon.Address, storageKey libcommon.Hash, nodeType int) []byte { + t.Helper() + for _, operator := range w.Operators { switch op := operator.(type) { case *trie.OperatorSMTLeafValue: @@ -109,23 +112,19 @@ func TestSMTWitnessRetainList(t *testing.T) { sVal := uint256.NewInt(0xdeadbeef) witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) - - if err != nil { - t.Errorf("error building witness: %v", err) - } + require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) foundBalance := findNode(t, witness, contract, libcommon.Hash{}, utils.KEY_BALANCE) foundNonce := findNode(t, witness, contract, libcommon.Hash{}, utils.KEY_NONCE) foundStorage := findNode(t, witness, contract, sKey, utils.SC_STORAGE) - if foundCode == nil || foundBalance == nil || foundNonce == nil || foundStorage == nil { - t.Errorf("witness does not contain all expected operators") - } + require.NotNil(t, foundCode) + require.NotNil(t, foundBalance) + require.NotNil(t, foundNonce) + require.NotNil(t, foundStorage) - if !bytes.Equal(foundStorage, sVal.Bytes()) { - t.Errorf("witness contains unexpected storage value") - } + require.Equal(t, foundStorage, sVal.Bytes(), "witness contains unexpected storage value") } func TestSMTWitnessRetainListEmptyVal(t *testing.T) { @@ -136,25 +135,106 @@ func TestSMTWitnessRetainListEmptyVal(t *testing.T) { sKey := libcommon.HexToHash("0x5") // Set nonce to 0 - smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(0).ToBig()) + _, err := smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(0).ToBig()) + require.NoError(t, err) witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) - - if err != nil { - t.Errorf("error building witness: %v", err) - } + require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) foundBalance := findNode(t, witness, contract, libcommon.Hash{}, utils.KEY_BALANCE) foundNonce := findNode(t, witness, contract, libcommon.Hash{}, utils.KEY_NONCE) foundStorage := findNode(t, witness, contract, sKey, utils.SC_STORAGE) - if foundCode == nil || foundBalance == nil || foundStorage == nil { - t.Errorf("witness does not contain all expected operators") - } + // Code, balance and storage should be present in the witness + require.NotNil(t, foundCode) + require.NotNil(t, foundBalance) + require.NotNil(t, foundStorage) // Nonce should not be in witness - if foundNonce != nil { - t.Errorf("witness contains unexpected operator") - } + require.Nil(t, foundNonce, "witness contains unexpected operator") +} + +// TestWitnessToSMT tests that the SMT built from a witness matches the original SMT +func TestWitnessToSMT(t *testing.T) { + smtTrie, rl := prepareSMT(t) + + witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + require.NoError(t, err, "error building witness") + + newSMT, err := smt.BuildSMTfromWitness(witness) + require.NoError(t, err, "error building SMT from witness") + + root, err := newSMT.Db.GetLastRoot() + require.NoError(t, err, "error getting last root from db") + + // newSMT.Traverse(context.Background(), root, func(prefix []byte, k utils.NodeKey, v utils.NodeValue12) (bool, error) { + // fmt.Printf("[After] path: %v, hash: %x\n", prefix, libcommon.BigToHash(k.ToBigInt())) + // return true, nil + // }) + + expectedRoot, err := smtTrie.Db.GetLastRoot() + require.NoError(t, err, "error getting last root") + + // assert that the roots are the same + require.Equal(t, expectedRoot, root, "SMT root mismatch") +} + +// TestWitnessToSMTStateReader tests that the SMT built from a witness matches the state +func TestWitnessToSMTStateReader(t *testing.T) { + smtTrie, rl := prepareSMT(t) + + sKey := libcommon.HexToHash("0x5") + + expectedRoot, err := smtTrie.Db.GetLastRoot() + require.NoError(t, err, "error getting last root") + + witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + require.NoError(t, err, "error building witness") + + newSMT, err := smt.BuildSMTfromWitness(witness) + require.NoError(t, err, "error building SMT from witness") + + root, err := newSMT.Db.GetLastRoot() + require.NoError(t, err, "error getting the last root from db") + + require.Equal(t, expectedRoot, root, "SMT root mismatch") + + contract := libcommon.HexToAddress("0x71dd1027069078091B3ca48093B00E4735B20624") + + expectedAcc, err := smtTrie.ReadAccountData(contract) + require.NoError(t, err) + + newAcc, err := newSMT.ReadAccountData(contract) + require.NoError(t, err) + + expectedAccCode, err := smtTrie.ReadAccountCode(contract, 0, expectedAcc.CodeHash) + require.NoError(t, err) + + newAccCode, err := newSMT.ReadAccountCode(contract, 0, newAcc.CodeHash) + require.NoError(t, err) + + expectedAccCodeSize, err := smtTrie.ReadAccountCodeSize(contract, 0, expectedAcc.CodeHash) + require.NoError(t, err) + + newAccCodeSize, err := newSMT.ReadAccountCodeSize(contract, 0, newAcc.CodeHash) + require.NoError(t, err) + + expectedStorageValue, err := smtTrie.ReadAccountStorage(contract, 0, &sKey) + require.NoError(t, err) + + newStorageValue, err := newSMT.ReadAccountStorage(contract, 0, &sKey) + require.NoError(t, err) + + // assert that the account data is the same + require.Equal(t, expectedAcc, newAcc) + + // assert that account code is the same + require.Equal(t, expectedAccCode, newAccCode) + + // assert that the account code size is the same + require.Equal(t, expectedAccCodeSize, newAccCodeSize) + + // assert that the storage value is the same + require.Equal(t, expectedStorageValue, newStorageValue) } From 0715bc50712206a4542cfadc62a1c53f9fc5dd3e Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Tue, 5 Nov 2024 10:55:24 +0100 Subject: [PATCH 09/88] Fix query body in JSONRPCCall (#1410) --- zkevm/jsonrpc/client/client.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/zkevm/jsonrpc/client/client.go b/zkevm/jsonrpc/client/client.go index 950731e947f..ddadae7575e 100644 --- a/zkevm/jsonrpc/client/client.go +++ b/zkevm/jsonrpc/client/client.go @@ -37,9 +37,13 @@ func (e *HTTPError) Error() string { func JSONRPCCall(url, method string, parameters ...interface{}) (types.Response, error) { const jsonRPCVersion = "2.0" - params, err := json.Marshal(parameters) - if err != nil { - return types.Response{}, err + params := []byte{} + if len(parameters) != 0 { + var err error + params, err = json.Marshal(parameters) + if err != nil { + return types.Response{}, err + } } req := types.Request{ @@ -96,9 +100,13 @@ func JSONRPCBatchCall(url string, methods []string, parameterGroups ...[]interfa batchRequest := make([]types.Request, 0, len(methods)) for i, method := range methods { - params, err := json.Marshal(parameterGroups[i]) - if err != nil { - return nil, err + params := []byte{} + if len(parameterGroups[i]) != 0 { + var err error + params, err = json.Marshal(parameterGroups[i]) + if err != nil { + return nil, err + } } req := types.Request{ From 1dcac1bf185dba55a9014de314f04975ccfd566c Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:28:50 +0000 Subject: [PATCH 10/88] quiet logging for witness generation unwinds (#1407) * quiet logging for witness generation unwinds * fix - integration --------- Co-authored-by: Max Revitt --- cmd/integration/commands/stages.go | 2 +- cmd/integration/commands/state_stages.go | 2 +- eth/stagedsync/default_stages.go | 8 ++++---- eth/stagedsync/stage_hashstate.go | 18 ++++++++++-------- eth/stagedsync/stage_hashstate_test.go | 4 ++-- turbo/jsonrpc/eth_call.go | 2 +- zk/stages/stages.go | 4 ++-- zk/witness/witness.go | 2 +- 8 files changed, 22 insertions(+), 20 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 8779d440348..a0536042f92 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -1213,7 +1213,7 @@ func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error { cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3, agg) if unwind > 0 { u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) - err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger) + err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger, false) if err != nil { return err } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 998cc56e6b2..6aaf4c9933f 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -473,7 +473,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e to := execStage.BlockNumber - unwind _ = sync.SetCurrentStage(stages.HashState) u := &stagedsync.UnwindState{ID: stages.HashState, UnwindPoint: to} - if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs, historyV3, agg), ctx, logger); err != nil { + if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs, historyV3, agg), ctx, logger, false); err != nil { return err } _ = sync.SetCurrentStage(stages.IntermediateHashes) diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 6473732be4e..458d1236dfa 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -138,7 +138,7 @@ func DefaultStages(ctx context.Context, return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneHashStateStage(p, tx, hashState, ctx) @@ -318,7 +318,7 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneHashStateStage(p, tx, hashState, ctx) @@ -527,7 +527,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error { return PruneHashStateStage(p, tx, hashState, ctx) @@ -701,7 +701,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, }, { diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index 8cfc58bc8b0..fc733015f0f 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -102,7 +102,7 @@ func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx contex return nil } -func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger) (err error) { +func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger, quiet bool) (err error) { useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -113,7 +113,7 @@ func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashSta } logPrefix := u.LogPrefix() - if err = unwindHashStateStageImpl(logPrefix, u, s, tx, cfg, ctx, logger); err != nil { + if err = unwindHashStateStageImpl(logPrefix, u, s, tx, cfg, ctx, logger, quiet); err != nil { return err } if err = u.Done(tx); err != nil { @@ -127,7 +127,7 @@ func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashSta return nil } -func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger) error { +func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger, quiet bool) error { // Currently it does not require unwinding because it does not create any Intermediate Hash records // and recomputes the state root from scratch prom := NewPromoter(tx, cfg.dirs, ctx, logger) @@ -143,13 +143,13 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t } return nil } - if err := prom.Unwind(logPrefix, s, u, false /* storage */, true /* codes */); err != nil { + if err := prom.Unwind(logPrefix, s, u, false /* storage */, true /* codes */, quiet); err != nil { return err } - if err := prom.Unwind(logPrefix, s, u, false /* storage */, false /* codes */); err != nil { + if err := prom.Unwind(logPrefix, s, u, false /* storage */, false /* codes */, quiet); err != nil { return err } - if err := prom.Unwind(logPrefix, s, u, true /* storage */, false /* codes */); err != nil { + if err := prom.Unwind(logPrefix, s, u, true /* storage */, false /* codes */, quiet); err != nil { return err } return nil @@ -844,7 +844,7 @@ func (p *Promoter) UnwindOnHistoryV3(logPrefix string, agg *state.Aggregator, un return collector.Load(p.tx, kv.HashedAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.ctx.Done()}) } -func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, storage bool, codes bool) error { +func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, storage bool, codes bool, quiet bool) error { var changeSetBucket string if storage { changeSetBucket = kv.StorageChangeSet @@ -854,7 +854,9 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora from := s.BlockNumber to := u.UnwindPoint - p.logger.Info(fmt.Sprintf("[%s] Unwinding started", logPrefix), "from", from, "to", to, "storage", storage, "codes", codes) + if !quiet { + p.logger.Info(fmt.Sprintf("[%s] Unwinding started", logPrefix), "from", from, "to", to, "storage", storage, "codes", codes) + } startkey := hexutility.EncodeTs(to + 1) diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index 878ed95b2e4..3d3ab508228 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -106,7 +106,7 @@ func TestUnwindHashed(t *testing.T) { } u := &UnwindState{UnwindPoint: 50} s := &StageState{BlockNumber: 100} - err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs, historyV3, nil), context.Background(), logger) + err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs, historyV3, nil), context.Background(), logger, false) if err != nil { t.Errorf("error while unwind state: %v", err) } @@ -227,7 +227,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { u := &UnwindState{UnwindPoint: 5} s := &StageState{BlockNumber: 10} - if err = unwindHashStateStageImpl("logPrefix", u, s, tx, cfg, ctx, logger); !errors.Is(err, tc.errExp) { + if err = unwindHashStateStageImpl("logPrefix", u, s, tx, cfg, ctx, logger, false); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown unwindHashStateStageImpl, got: %v, expected: %v", err, tc.errExp) } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index 15c80a69747..690bb746fc3 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -362,7 +362,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto stageState := &stagedsync.StageState{BlockNumber: latestBlock} hashStageCfg := stagedsync.StageHashStateCfg(nil, api.dirs, api.historyV3(batch), api._agg) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger); err != nil { + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger, true); err != nil { return nil, err } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index e4921de764f..f1335b12164 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -125,7 +125,7 @@ func SequencerZkStages( return stages.SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { - return stages.UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return stages.UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { return stages.PruneHashStateStage(p, tx, hashState, ctx) @@ -328,7 +328,7 @@ func DefaultZkStages( return stages.SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { - return stages.UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger) + return stages.UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger, false) }, Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { return stages.PruneHashStateStage(p, tx, hashState, ctx) diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 4cae4233ee8..2350fd250fc 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -223,7 +223,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint stageState := &stagedsync.StageState{BlockNumber: latestBlock} hashStageCfg := stagedsync.StageHashStateCfg(nil, g.dirs, g.historyV3, g.agg) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, log.New()); err != nil { + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, log.New(), true); err != nil { return nil, fmt.Errorf("unwind hash state: %w", err) } From b6cac453c8f60be26e1a85561fd315c88f7f6870 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Tue, 5 Nov 2024 07:44:53 -0300 Subject: [PATCH 11/88] add unit test for stage l1_info_tree (#1408) --- .gitignore | 5 +- zk/l1infotree/updater.go | 23 ++-- zk/stages/stage_l1_info_tree.go | 1 + zk/stages/stage_l1_info_tree_test.go | 151 +++++++++++++++++++++++++++ 4 files changed, 168 insertions(+), 12 deletions(-) create mode 100644 zk/stages/stage_l1_info_tree_test.go diff --git a/.gitignore b/.gitignore index 8b2987eaec7..5c4d0b47c8e 100644 --- a/.gitignore +++ b/.gitignore @@ -101,7 +101,7 @@ jwt.hex .tool-versions -*__debug_bin* +**/*__debug_bin* yarn.lock node_modules @@ -110,3 +110,6 @@ node_modules /config.yml vendor + +**/cover.out +**/cover.html \ No newline at end of file diff --git a/zk/l1infotree/updater.go b/zk/l1infotree/updater.go index 4f51c861312..ecfebc7c76d 100644 --- a/zk/l1infotree/updater.go +++ b/zk/l1infotree/updater.go @@ -1,20 +1,21 @@ package l1infotree import ( + "errors" + "fmt" + "sort" + "time" + + "github.com/iden3/go-iden3-crypto/keccak256" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - zkTypes "github.com/ledgerwatch/erigon/zk/types" - "github.com/ledgerwatch/erigon/core/types" - "time" "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + zkTypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/log/v3" - "fmt" - "sort" - "github.com/ledgerwatch/erigon-lib/common" - "github.com/iden3/go-iden3-crypto/keccak256" - "errors" ) type Syncer interface { @@ -137,7 +138,7 @@ LOOP: defer ticker.Stop() processed := 0 - tree, err := initialiseL1InfoTree(hermezDb) + tree, err := InitialiseL1InfoTree(hermezDb) if err != nil { return nil, err } @@ -238,7 +239,7 @@ func chunkLogs(slice []types.Log, chunkSize int) [][]types.Log { return chunks } -func initialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { +func InitialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { leaves, err := hermezDb.GetAllL1InfoTreeLeaves() if err != nil { return nil, err diff --git a/zk/stages/stage_l1_info_tree.go b/zk/stages/stage_l1_info_tree.go index 19c2202ad35..7547d240230 100644 --- a/zk/stages/stage_l1_info_tree.go +++ b/zk/stages/stage_l1_info_tree.go @@ -3,6 +3,7 @@ package stages import ( "context" "fmt" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" diff --git a/zk/stages/stage_l1_info_tree_test.go b/zk/stages/stage_l1_info_tree_test.go new file mode 100644 index 00000000000..c2e3a93d511 --- /dev/null +++ b/zk/stages/stage_l1_info_tree_test.go @@ -0,0 +1,151 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/iden3/go-iden3-crypto/keccak256" + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/l1infotree" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnL1InfoTreeStage(t *testing.T) { + // arrange + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + hDB := hermez_db.NewHermezDb(tx) + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.L1InfoTree, 20) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.L1InfoTree, BlockNumber: 0} + u := &stagedsync.Sync{} + + // mocks + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + EthermanMock := mocks.NewMockIEtherman(mockCtrl) + + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + latestBlockParentHash := common.HexToHash("0x123456789") + latestBlockTime := uint64(time.Now().Unix()) + latestBlockNumber := big.NewInt(21) + latestBlockHeader := &types.Header{ParentHash: latestBlockParentHash, Number: latestBlockNumber, Time: latestBlockTime} + latestBlock := types.NewBlockWithHeader(latestBlockHeader) + + EthermanMock.EXPECT().HeaderByNumber(gomock.Any(), latestBlockNumber).Return(latestBlockHeader, nil).AnyTimes() + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestBlock, nil).AnyTimes() + filterQuery := ethereum.FilterQuery{ + FromBlock: latestBlockNumber, + ToBlock: latestBlockNumber, + Addresses: l1ContractAddresses, + Topics: l1ContractTopics, + } + mainnetExitRoot := common.HexToHash("0x111") + rollupExitRoot := common.HexToHash("0x222") + + l1InfoTreeLog := types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.UpdateL1InfoTreeTopic, mainnetExitRoot, rollupExitRoot}, + } + filteredLogs := []types.Log{l1InfoTreeLog} + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + updater := l1infotree.NewUpdater(ðconfig.Zk{}, l1Syncer) + cfg := StageL1InfoTreeCfg(db1, ðconfig.Zk{}, updater) + + // act + err = SpawnL1InfoTreeStage(s, u, tx, cfg, ctx, log.New()) + require.NoError(t, err) + + // assert + // check tree + tree, err := l1infotree.InitialiseL1InfoTree(hDB) + require.NoError(t, err) + + combined := append(mainnetExitRoot.Bytes(), rollupExitRoot.Bytes()...) + gerBytes := keccak256.Hash(combined) + ger := common.BytesToHash(gerBytes) + leafBytes := l1infotree.HashLeafData(ger, latestBlockParentHash, latestBlockTime) + + assert.True(t, tree.LeafExists(leafBytes)) + + // check WriteL1InfoTreeLeaf + leaves, err := hDB.GetAllL1InfoTreeLeaves() + require.NoError(t, err) + + leafHash := common.BytesToHash(leafBytes[:]) + assert.Len(t, leaves, 1) + assert.Equal(t, leafHash.String(), leaves[0].String()) + + // check WriteL1InfoTreeUpdate + l1InfoTreeUpdate, err := hDB.GetL1InfoTreeUpdate(0) + require.NoError(t, err) + + assert.Equal(t, uint64(0), l1InfoTreeUpdate.Index) + assert.Equal(t, ger, l1InfoTreeUpdate.GER) + assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdate.MainnetExitRoot) + assert.Equal(t, rollupExitRoot, l1InfoTreeUpdate.RollupExitRoot) + assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdate.BlockNumber) + assert.Equal(t, latestBlockTime, l1InfoTreeUpdate.Timestamp) + assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdate.ParentHash) + + //check WriteL1InfoTreeUpdateToGer + l1InfoTreeUpdateToGer, err := hDB.GetL1InfoTreeUpdateByGer(ger) + require.NoError(t, err) + + assert.Equal(t, uint64(0), l1InfoTreeUpdateToGer.Index) + assert.Equal(t, ger, l1InfoTreeUpdateToGer.GER) + assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdateToGer.MainnetExitRoot) + assert.Equal(t, rollupExitRoot, l1InfoTreeUpdateToGer.RollupExitRoot) + assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdateToGer.BlockNumber) + assert.Equal(t, latestBlockTime, l1InfoTreeUpdateToGer.Timestamp) + assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdateToGer.ParentHash) + + // check WriteL1InfoTreeRoot + root, _, _ := tree.GetCurrentRootCountAndSiblings() + index, found, err := hDB.GetL1InfoTreeIndexByRoot(root) + assert.NoError(t, err) + assert.Equal(t, uint64(0), index) + assert.True(t, found) + + // check SaveStageProgress + progress, err := stages.GetStageProgress(tx, stages.L1InfoTree) + require.NoError(t, err) + assert.Equal(t, latestBlockNumber.Uint64()+1, progress) +} From 642df8f3853684dd3d7d1a881c4f5eb521d76ef6 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 5 Nov 2024 14:42:06 +0000 Subject: [PATCH 12/88] limit modexp call to 8192 bit inputs (#1391) * limit modexp call to 8192 bit inputs * logic change to mod exp revert rules * mod len 0 logic in modExp * remove comment from modexp * more mod exp zk tweaks --- core/vm/contracts_zkevm.go | 65 +++++++++++++++++++++++++------ core/vm/contracts_zkevm_test.go | 69 +++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 11 deletions(-) create mode 100644 core/vm/contracts_zkevm_test.go diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 10047230e26..037f2bc7c42 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -299,6 +299,34 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { } else { input = input[:0] } + + // Retrieve the operands and execute the exponentiation + var ( + base = new(big.Int).SetBytes(getData(input, 0, baseLen.Uint64())) + exp = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64())) + mod = new(big.Int).SetBytes(getData(input, baseLen.Uint64()+expLen.Uint64(), modLen.Uint64())) + baseBitLen = base.BitLen() + expBitLen = exp.BitLen() + modBitLen = mod.BitLen() + ) + + // zk special cases + // - if mod = 0 we consume gas as normal + // - if base is 0 and mod < 8192 we consume gas as normal + // - if neither of the above are true we check for reverts and return 0 gas fee + + if modBitLen == 0 { + // consume as normal - will return 0 + } else if baseBitLen == 0 { + if modBitLen > 8192 { + return 0 + } else { + // consume as normal - will return 0 + } + } else if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { + return 0 + } + // Retrieve the head 32 bytes of exp for the adjusted exponent length var expHead *big.Int if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 { @@ -373,21 +401,36 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { } else { input = input[:0] } - // Handle a special case when both the base and mod length is zero - if baseLen == 0 && modLen == 0 { - return []byte{}, nil - } + // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) - v []byte + base = new(big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + v []byte + baseBitLen = base.BitLen() + expBitLen = exp.BitLen() + modBitLen = mod.BitLen() ) + + if modBitLen == 0 { + return []byte{}, nil + } + + if baseBitLen == 0 { + if modBitLen > 8192 { + return nil, ErrExecutionReverted + } else { + return common.LeftPadBytes([]byte{}, int(modLen)), nil + } + } + + // limit to 8192 bits for base, exp, and mod in ZK + if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { + return nil, ErrExecutionReverted + } + switch { - case mod.BitLen() == 0: - // Modulo 0 is undefined, return zero - return common.LeftPadBytes([]byte{}, int(modLen)), nil case base.Cmp(libcommon.Big1) == 0: //If base == 1, then we can just return base % mod (if mod >= 1, which it is) v = base.Mod(base, mod).Bytes() diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go new file mode 100644 index 00000000000..e3a8d27d3c1 --- /dev/null +++ b/core/vm/contracts_zkevm_test.go @@ -0,0 +1,69 @@ +package vm + +import ( + "testing" + "math/big" +) + +var ( + big0 = big.NewInt(0) + big10 = big.NewInt(10) + big8194 = big.NewInt(0).Lsh(big.NewInt(1), 8194) +) + +func Test_ModExpZkevm_Gas(t *testing.T) { + modExp := bigModExp_zkevm{enabled: true, eip2565: true} + + cases := map[string]struct { + base *big.Int + exp *big.Int + mod *big.Int + expected uint64 + }{ + "simple test": {big10, big10, big10, 200}, + "0 mod - normal gas": {big10, big10, big0, 200}, + "base 0 - mod < 8192 - normal gas": {big0, big10, big10, 200}, + "base 0 - mod > 8192 - 0 gas": {big0, big10, big8194, 0}, + "base over 8192 - 0 gas": {big8194, big10, big10, 0}, + "exp over 8192 - 0 gas": {big10, big8194, big10, 0}, + "mod over 8192 - 0 gas": {big10, big10, big8194, 0}, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + input := make([]byte, 0) + + base := len(test.base.Bytes()) + exp := len(test.exp.Bytes()) + mod := len(test.mod.Bytes()) + + input = append(input, uint64To32Bytes(base)...) + input = append(input, uint64To32Bytes(exp)...) + input = append(input, uint64To32Bytes(mod)...) + input = append(input, uint64ToDeterminedBytes(test.base, base)...) + input = append(input, uint64ToDeterminedBytes(test.exp, exp)...) + input = append(input, uint64ToDeterminedBytes(test.mod, mod)...) + + gas := modExp.RequiredGas(input) + + if gas != test.expected { + t.Errorf("Expected %d, got %d", test.expected, gas) + } + }) + } +} + +func uint64To32Bytes(input int) []byte { + bigInt := new(big.Int).SetUint64(uint64(input)) + bytes := bigInt.Bytes() + result := make([]byte, 32) + copy(result[32-len(bytes):], bytes) + return result +} + +func uint64ToDeterminedBytes(input *big.Int, length int) []byte { + bytes := input.Bytes() + result := make([]byte, length) + copy(result[length-len(bytes):], bytes) + return result +} From 71eaa3d1e9d9b23709c5bdae9b2e7716729992cd Mon Sep 17 00:00:00 2001 From: Jerry Date: Tue, 5 Nov 2024 12:20:57 -0800 Subject: [PATCH 13/88] Fix resequence script due to new cast version (#1414) --- .github/scripts/test_resequence.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/scripts/test_resequence.sh b/.github/scripts/test_resequence.sh index 9e2bef413c9..e80cf7188a4 100755 --- a/.github/scripts/test_resequence.sh +++ b/.github/scripts/test_resequence.sh @@ -18,7 +18,7 @@ get_latest_l2_batch() { } get_latest_l1_verified_batch() { - current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 -j | jq -r '.[] | select(.topics[0] == "0x9c72852172521097ba7e1482e6b44b351323df0155f97f4ea18fcec28e1f5966" or .topics[0] == "0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3") | .topics[1]' | tail -n 1 | sed 's/^0x//') + current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 --json | jq -r '.[] | select(.topics[0] == "0x9c72852172521097ba7e1482e6b44b351323df0155f97f4ea18fcec28e1f5966" or .topics[0] == "0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3") | .topics[1]' | tail -n 1 | sed 's/^0x//') current_batch_dec=$((16#$current_batch)) echo "$current_batch_dec" } @@ -46,7 +46,7 @@ wait_for_l1_batch() { fi if [ "$batch_type" = "virtual" ]; then - current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 -j | jq -r '.[] | select(.topics[0] == "0x3e54d0825ed78523037d00a81759237eb436ce774bd546993ee67a1b67b6e766") | .topics[1]' | tail -n 1 | sed 's/^0x//') + current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 --json | jq -r '.[] | select(.topics[0] == "0x3e54d0825ed78523037d00a81759237eb436ce774bd546993ee67a1b67b6e766") | .topics[1]' | tail -n 1 | sed 's/^0x//') current_batch=$((16#$current_batch)) elif [ "$batch_type" = "verified" ]; then current_batch=$(cast rpc zkevm_verifiedBatchNumber --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | sed 's/^"//;s/"$//') From 26fd344b1b4868f44098f3cb26ae07ab5a04a36d Mon Sep 17 00:00:00 2001 From: Moretti Georgiev Date: Wed, 6 Nov 2024 12:51:46 +0200 Subject: [PATCH 14/88] fix: healthcheck EthAPI interface (#1416) * fix: healthcheck EthAPI interface * fix: GetBlockByNumber pointer for fullTx param in rpcdaemon health test --- cmd/rpcdaemon/health/check_block.go | 3 ++- cmd/rpcdaemon/health/check_time.go | 7 +++---- cmd/rpcdaemon/health/health_test.go | 2 +- cmd/rpcdaemon/health/interfaces.go | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/rpcdaemon/health/check_block.go b/cmd/rpcdaemon/health/check_block.go index 93e8d71fc7e..cd3c973c2e7 100644 --- a/cmd/rpcdaemon/health/check_block.go +++ b/cmd/rpcdaemon/health/check_block.go @@ -11,7 +11,8 @@ func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error { if api == nil { return fmt.Errorf("no connection to the Erigon server or `eth` namespace isn't enabled") } - data, err := api.GetBlockByNumber(context.TODO(), blockNumber, false) + fullTx := false + data, err := api.GetBlockByNumber(context.TODO(), blockNumber, &fullTx) if err != nil { return err } diff --git a/cmd/rpcdaemon/health/check_time.go b/cmd/rpcdaemon/health/check_time.go index e78f8aee858..ffdfde24bde 100644 --- a/cmd/rpcdaemon/health/check_time.go +++ b/cmd/rpcdaemon/health/check_time.go @@ -8,16 +8,15 @@ import ( "github.com/ledgerwatch/erigon/rpc" ) -var ( - errTimestampTooOld = errors.New("timestamp too old") -) +var errTimestampTooOld = errors.New("timestamp too old") func checkTime( r *http.Request, seconds int, ethAPI EthAPI, ) error { - i, err := ethAPI.GetBlockByNumber(r.Context(), rpc.LatestBlockNumber, false) + fullTx := false + i, err := ethAPI.GetBlockByNumber(r.Context(), rpc.LatestBlockNumber, &fullTx) if err != nil { return err } diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go index f46146a4feb..419c7b9912b 100644 --- a/cmd/rpcdaemon/health/health_test.go +++ b/cmd/rpcdaemon/health/health_test.go @@ -32,7 +32,7 @@ type ethApiStub struct { syncingError error } -func (e *ethApiStub) GetBlockByNumber(_ context.Context, _ rpc.BlockNumber, _ bool) (map[string]interface{}, error) { +func (e *ethApiStub) GetBlockByNumber(_ context.Context, _ rpc.BlockNumber, _ *bool) (map[string]interface{}, error) { return e.blockResult, e.blockError } diff --git a/cmd/rpcdaemon/health/interfaces.go b/cmd/rpcdaemon/health/interfaces.go index 2fabf8d5de4..441def69bfd 100644 --- a/cmd/rpcdaemon/health/interfaces.go +++ b/cmd/rpcdaemon/health/interfaces.go @@ -2,6 +2,7 @@ package health import ( "context" + "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon/rpc" @@ -12,6 +13,6 @@ type NetAPI interface { } type EthAPI interface { - GetBlockByNumber(_ context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) + GetBlockByNumber(_ context.Context, number rpc.BlockNumber, fullTx *bool) (map[string]interface{}, error) Syncing(ctx context.Context) (interface{}, error) } From 8cc6d54a7b5f4605cee1ecfe5c4ebc871ba8ccea Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:43:50 +0000 Subject: [PATCH 15/88] immediate batch seal flag for counter overflows (#1417) # Conflicts: # eth/ethconfig/config_zkevm.go # turbo/cli/default_flags.go # turbo/cli/flags_zkevm.go # zk/debug_tools/test-contracts/package.json # zk/stages/stage_sequence_execute.go --- cmd/utils/flags.go | 5 ++++ eth/ethconfig/config_zkevm.go | 9 ++++--- turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 1 + .../test-contracts/contracts/KeccakLoop.sol | 10 +++++++ zk/debug_tools/test-contracts/package.json | 3 ++- .../test-contracts/scripts/keccak-loop.js | 26 +++++++++++++++++++ zk/stages/stage_sequence_execute.go | 4 +-- 8 files changed, 52 insertions(+), 7 deletions(-) create mode 100644 zk/debug_tools/test-contracts/contracts/KeccakLoop.sol create mode 100644 zk/debug_tools/test-contracts/scripts/keccak-loop.js diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 45290535b50..96fe7d2042f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -748,6 +748,11 @@ var ( Usage: "The interval at which the sequencer checks the L1 for new GER information", Value: 1 * time.Minute, } + SealBatchImmediatelyOnOverflow = cli.BoolFlag{ + Name: "zkevm.seal-batch-immediately-on-overflow", + Usage: "Seal the batch immediately when detecting a counter overflow", + Value: false, + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", Usage: "Number of entries to print from the ACL history on node start up", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 89ff70fc810..8cd78e64753 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -87,10 +87,11 @@ type Zk struct { TxPoolRejectSmartContractDeployments bool - InitialBatchCfgFile string - ACLPrintHistory int - InfoTreeUpdateInterval time.Duration - BadBatches []uint64 + InitialBatchCfgFile string + ACLPrintHistory int + InfoTreeUpdateInterval time.Duration + BadBatches []uint64 + SealBatchImmediatelyOnOverflow bool } var DefaultZkConfig = &Zk{} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 682bbbf2bec..b8dd0f6463e 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -286,4 +286,5 @@ var DefaultFlags = []cli.Flag{ &utils.ACLPrintHistory, &utils.InfoTreeUpdateInterval, + &utils.SealBatchImmediatelyOnOverflow, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 02d773dc363..56ec8ec0bee 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -204,6 +204,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { InitialBatchCfgFile: ctx.String(utils.InitialBatchCfgFile.Name), ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name), InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), + SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), } utils2.EnableTimer(cfg.DebugTimers) diff --git a/zk/debug_tools/test-contracts/contracts/KeccakLoop.sol b/zk/debug_tools/test-contracts/contracts/KeccakLoop.sol new file mode 100644 index 00000000000..23c1349fb25 --- /dev/null +++ b/zk/debug_tools/test-contracts/contracts/KeccakLoop.sol @@ -0,0 +1,10 @@ +pragma solidity >=0.8.10; + +// 1616 for legacy, 226 for erigon -> 1198 -> 246 +contract KeccakLoop { + constructor () { + for(uint256 i = 0; i < 200; i++) { + keccak256(new bytes(i)); + } + } +} \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/package.json b/zk/debug_tools/test-contracts/package.json index 06514e69be1..7022ac7abf5 100644 --- a/zk/debug_tools/test-contracts/package.json +++ b/zk/debug_tools/test-contracts/package.json @@ -20,7 +20,8 @@ "erc20Revert:sepolia": "npx hardhat compile && npx hardhat run scripts/ERC20-revert.js --network sepolia", "chainCall:local": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network local", "chainCall:sepolia": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network sepolia", - "create:local": "npx hardhat compile && npx hardhat run scripts/create.js --network local" + "create:local": "npx hardhat compile && npx hardhat run scripts/create.js --network local", + "keccak:local": "npx hardhat compile && npx hardhat run scripts/keccak-loop.js --network local" }, "keywords": [], "author": "", diff --git a/zk/debug_tools/test-contracts/scripts/keccak-loop.js b/zk/debug_tools/test-contracts/scripts/keccak-loop.js new file mode 100644 index 00000000000..5fe60b34bc3 --- /dev/null +++ b/zk/debug_tools/test-contracts/scripts/keccak-loop.js @@ -0,0 +1,26 @@ +async function main() { +try { + // Get the ContractFactory of your KeccakLoopContract + const KeccakLoopContract = await hre.ethers.getContractFactory("KeccakLoop"); + + // Deploy the contract + const contract = await KeccakLoopContract.deploy(); + // Wait for the deployment transaction to be mined + await contract.waitForDeployment(); + + console.log(`KeccakLoop deployed to: ${await contract.getAddress()}`); + + // const result = await contract.bigLoop(10000); + // console.log(result); + } catch (error) { + console.error(error); + process.exit(1); + } +} + +main() + .then(() => process.exit(0)) + .catch(error => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 5387b30fe91..e55f17e41f6 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -440,8 +440,8 @@ func sequencingBatchStep( ocs, _ := batchCounters.CounterStats(l1TreeUpdateIndex != 0) // was not included in this batch because it overflowed: counter x, counter y log.Info(transactionNotAddedText, "Counters context:", ocs, "overflow transactions", batchState.overflowTransactions) - if batchState.reachedOverflowTransactionLimit() { - log.Info(fmt.Sprintf("[%s] closing batch due to counters", logPrefix), "counters: ", batchState.overflowTransactions) + if batchState.reachedOverflowTransactionLimit() || cfg.zk.SealBatchImmediatelyOnOverflow { + log.Info(fmt.Sprintf("[%s] closing batch due to counters", logPrefix), "counters: ", batchState.overflowTransactions, "immediate", cfg.zk.SealBatchImmediatelyOnOverflow) runLoopBlocks = false break LOOP_TRANSACTIONS } From 1d05e317925e9c167b8c62b6e7ecb32291bd49d0 Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 6 Nov 2024 08:28:15 -0800 Subject: [PATCH 16/88] Prevent zero tracer from panic in edge cases (#1411) --- eth/tracers/native/zero.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/tracers/native/zero.go b/eth/tracers/native/zero.go index 3d4c05b51a6..65c32c30069 100644 --- a/eth/tracers/native/zero.go +++ b/eth/tracers/native/zero.go @@ -115,7 +115,7 @@ func (t *zeroTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco slot := libcommon.Hash(stackData[stackLen-1].Bytes32()) t.addAccountToTrace(caller) t.addSLOADToAccount(caller, slot) - case stackLen >= 1 && op == vm.SSTORE: + case stackLen >= 2 && op == vm.SSTORE: slot := libcommon.Hash(stackData[stackLen-1].Bytes32()) t.addAccountToTrace(caller) t.addSSTOREToAccount(caller, slot, stackData[stackLen-2].Clone()) From fd88d871ddd3ce118d838ebc02cb474ce90de489 Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 6 Nov 2024 16:55:26 -0300 Subject: [PATCH 17/88] WIP: add unit test to stage l1_sequencer_sync --- zk/stages/stage_l1_sequencer_sync.go | 1 - zk/stages/stage_l1_sequencer_sync_test.go | 193 ++++++++++++++++++++++ 2 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 zk/stages/stage_l1_sequencer_sync_test.go diff --git a/zk/stages/stage_l1_sequencer_sync.go b/zk/stages/stage_l1_sequencer_sync.go index ee2e12f83ca..187946c1dda 100644 --- a/zk/stages/stage_l1_sequencer_sync.go +++ b/zk/stages/stage_l1_sequencer_sync.go @@ -64,7 +64,6 @@ func SpawnL1SequencerSyncStage( } if progress == 0 { progress = cfg.zkCfg.L1FirstBlock - 1 - } // if the flag is set - wait for that block to be finalized on L1 before continuing diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go new file mode 100644 index 00000000000..d5482804c89 --- /dev/null +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -0,0 +1,193 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnL1SequencerSyncStage(t *testing.T) { + // arrange + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + hDB := hermez_db.NewHermezDb(tx) + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.L1SequencerSync, 0) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.L1SequencerSync, BlockNumber: 0} + u := &stagedsync.Sync{} + + // mocks + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + EthermanMock := mocks.NewMockIEtherman(mockCtrl) + + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + l1FirstBlock := big.NewInt(20) + + finalizedBlockParentHash := common.HexToHash("0x123456789") + finalizedBlockTime := uint64(time.Now().Unix()) + finalizedBlockNumber := big.NewInt(21) + finalizedBlockHeader := &types.Header{ParentHash: finalizedBlockParentHash, Number: finalizedBlockNumber, Time: finalizedBlockTime} + finalizedBlock := types.NewBlockWithHeader(finalizedBlockHeader) + + latestBlockParentHash := finalizedBlock.Hash() + latestBlockTime := uint64(time.Now().Unix()) + latestBlockNumber := big.NewInt(22) + latestBlockHeader := &types.Header{ParentHash: latestBlockParentHash, Number: latestBlockNumber, Time: latestBlockTime} + latestBlock := types.NewBlockWithHeader(latestBlockHeader) + + EthermanMock.EXPECT().HeaderByNumber(gomock.Any(), finalizedBlockNumber).Return(finalizedBlockHeader, nil).AnyTimes() + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), big.NewInt(rpc.FinalizedBlockNumber.Int64())).Return(finalizedBlock, nil).AnyTimes() + EthermanMock.EXPECT().HeaderByNumber(gomock.Any(), latestBlockNumber).Return(latestBlockHeader, nil).AnyTimes() + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestBlock, nil).AnyTimes() + + filterQuery := ethereum.FilterQuery{ + FromBlock: l1FirstBlock, + ToBlock: latestBlockNumber, + Addresses: l1ContractAddresses, + Topics: l1ContractTopics, + } + + filteredLogs := []types.Log{ + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, + }, + + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, + }, + + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopic}, + }, + + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopicBanana}, + }, + + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.CreateNewRollupTopic}, + }, + + types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.UpdateRollupTopic}, + }, + } + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + // updater := l1infotree.NewUpdater(ðconfig.Zk{}, l1Syncer) + zkCfg := ðconfig.Zk{ + L1FirstBlock: l1FirstBlock.Uint64(), + L1FinalizedBlockRequirement: uint64(21), + } + cfg := StageL1SequencerSyncCfg(db1, zkCfg, l1Syncer) + + // act + err = SpawnL1SequencerSyncStage(s, u, tx, cfg, ctx, log.New()) + require.NoError(t, err) + + // // assert + // // check tree + // tree, err := l1infotree.InitialiseL1InfoTree(hDB) + // require.NoError(t, err) + + // combined := append(mainnetExitRoot.Bytes(), rollupExitRoot.Bytes()...) + // gerBytes := keccak256.Hash(combined) + // ger := common.BytesToHash(gerBytes) + // leafBytes := l1infotree.HashLeafData(ger, latestBlockParentHash, latestBlockTime) + + // assert.True(t, tree.LeafExists(leafBytes)) + + // // check WriteL1InfoTreeLeaf + // leaves, err := hDB.GetAllL1InfoTreeLeaves() + // require.NoError(t, err) + + // leafHash := common.BytesToHash(leafBytes[:]) + // assert.Len(t, leaves, 1) + // assert.Equal(t, leafHash.String(), leaves[0].String()) + + // // check WriteL1InfoTreeUpdate + // l1InfoTreeUpdate, err := hDB.GetL1InfoTreeUpdate(0) + // require.NoError(t, err) + + // assert.Equal(t, uint64(0), l1InfoTreeUpdate.Index) + // assert.Equal(t, ger, l1InfoTreeUpdate.GER) + // assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdate.MainnetExitRoot) + // assert.Equal(t, rollupExitRoot, l1InfoTreeUpdate.RollupExitRoot) + // assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdate.BlockNumber) + // assert.Equal(t, latestBlockTime, l1InfoTreeUpdate.Timestamp) + // assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdate.ParentHash) + + // //check WriteL1InfoTreeUpdateToGer + // l1InfoTreeUpdateToGer, err := hDB.GetL1InfoTreeUpdateByGer(ger) + // require.NoError(t, err) + + // assert.Equal(t, uint64(0), l1InfoTreeUpdateToGer.Index) + // assert.Equal(t, ger, l1InfoTreeUpdateToGer.GER) + // assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdateToGer.MainnetExitRoot) + // assert.Equal(t, rollupExitRoot, l1InfoTreeUpdateToGer.RollupExitRoot) + // assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdateToGer.BlockNumber) + // assert.Equal(t, latestBlockTime, l1InfoTreeUpdateToGer.Timestamp) + // assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdateToGer.ParentHash) + + // // check WriteL1InfoTreeRoot + // root, _, _ := tree.GetCurrentRootCountAndSiblings() + // index, found, err := hDB.GetL1InfoTreeIndexByRoot(root) + // assert.NoError(t, err) + // assert.Equal(t, uint64(0), index) + // assert.True(t, found) + + // // check SaveStageProgress + // progress, err := stages.GetStageProgress(tx, stages.L1InfoTree) + // require.NoError(t, err) + // assert.Equal(t, latestBlockNumber.Uint64()+1, progress) +} From c86a851533d94e75c961fde74c6563d0b2981a3e Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 7 Nov 2024 13:53:39 +0000 Subject: [PATCH 18/88] feat(fork): banana acc input hash (#1419) --- turbo/jsonrpc/zkevm_api.go | 4 +- zk/syncer/utils.go | 100 +++++++++++++++++++++++++++++++++++-- zk/utils/acc_input_hash.go | 34 +++++++++++++ 3 files changed, 133 insertions(+), 5 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 7523214d8b4..dada5250041 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -733,12 +733,12 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get old acc input hash for batch %d: %w", prevSequenceBatch, err) } - decodedSequenceInteerface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) + decodedSequenceInterface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) if err != nil { return nil, fmt.Errorf("failed to decode calldata for tx %s: %w", batchSequence.L1TxHash, err) } - accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInteerface) + accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInterface) if err != nil { return nil, fmt.Errorf("failed to get accInputHash calculation func: %w", err) } diff --git a/zk/syncer/utils.go b/zk/syncer/utils.go index 203405ad2a1..a6ad885af74 100644 --- a/zk/syncer/utils.go +++ b/zk/syncer/utils.go @@ -16,8 +16,8 @@ const ( sequenceBatchesValidiumMethodName = "sequenceBatchesValidium" ) -func GetAccInputDataCalcFunction(l1InfoRoot common.Hash, decodedSequenceInteerface interface{}) (accInputHashCalcFn func(prevAccInputHash common.Hash, index int) *common.Hash, totalSequenceBatches int, err error) { - switch decodedSequence := decodedSequenceInteerface.(type) { +func GetAccInputDataCalcFunction(l1InfoRoot common.Hash, decodedSequenceInterface interface{}) (accInputHashCalcFn func(prevAccInputHash common.Hash, index int) *common.Hash, totalSequenceBatches int, err error) { + switch decodedSequence := decodedSequenceInterface.(type) { case *SequenceBatchesCalldataPreEtrog: accInputHashCalcFn = func(prevAccInputHash common.Hash, index int) *common.Hash { return utils.CalculatePreEtrogAccInputHash(prevAccInputHash, decodedSequence.Batches[index].Transactions, decodedSequence.Batches[index].GlobalExitRoot, decodedSequence.Batches[index].Timestamp, decodedSequence.L2Coinbase) @@ -33,6 +33,11 @@ func GetAccInputDataCalcFunction(l1InfoRoot common.Hash, decodedSequenceInteerfa return utils.CalculateEtrogAccInputHash(prevAccInputHash, decodedSequence.Batches[index].Transactions, l1InfoRoot, decodedSequence.MaxSequenceTimestamp, decodedSequence.L2Coinbase, decodedSequence.Batches[index].ForcedBlockHashL1) } totalSequenceBatches = len(decodedSequence.Batches) + case *SequenceBatchesCalldataBanana: + accInputHashCalcFn = func(prevAccInputHash common.Hash, index int) *common.Hash { + return utils.CalculateBananaAccInputHash(prevAccInputHash, decodedSequence.Batches[index].Transactions, l1InfoRoot, decodedSequence.MaxSequenceTimestamp, decodedSequence.L2Coinbase, decodedSequence.Batches[index].ForcedBlockHashL1) + } + totalSequenceBatches = len(decodedSequence.Batches) case *SequenceBatchesCalldataValidiumPreEtrog: accInputHashCalcFn = func(prevAccInputHash common.Hash, index int) *common.Hash { return utils.CalculatePreEtrogValidiumAccInputHash(prevAccInputHash, decodedSequence.Batches[index].TransactionsHash, decodedSequence.Batches[index].GlobalExitRoot, decodedSequence.Batches[index].Timestamp, decodedSequence.L2Coinbase) @@ -48,8 +53,13 @@ func GetAccInputDataCalcFunction(l1InfoRoot common.Hash, decodedSequenceInteerfa return utils.CalculateEtrogValidiumAccInputHash(prevAccInputHash, decodedSequence.Batches[index].TransactionsHash, l1InfoRoot, decodedSequence.MaxSequenceTimestamp, decodedSequence.L2Coinbase, decodedSequence.Batches[index].ForcedBlockHashL1) } totalSequenceBatches = len(decodedSequence.Batches) + case *SequenceBatchesCalldataValidiumBanana: + accInputHashCalcFn = func(prevAccInputHash common.Hash, index int) *common.Hash { + return utils.CalculateBananaValidiumAccInputHash(prevAccInputHash, decodedSequence.Batches[index].TransactionHash, l1InfoRoot, decodedSequence.MaxSequenceTimestamp, decodedSequence.L2Coinbase, decodedSequence.Batches[index].ForcedBlockHashL1) + } + totalSequenceBatches = len(decodedSequence.Batches) default: - return nil, 0, fmt.Errorf("unexpected type of decoded sequence calldata: %T", decodedSequenceInteerface) + return nil, 0, fmt.Errorf("unexpected type of decoded sequence calldata: %T", decodedSequenceInterface) } return accInputHashCalcFn, totalSequenceBatches, nil @@ -101,11 +111,95 @@ func DecodeSequenceBatchesCalldata(data []byte) (calldata interface{}, err error } else { return decodeElderberryBatchesValidiumCallData(unpackedCalldata), nil } + case contracts.SequenceBatchesBanana: + if method.Name == sequenceBatchesMethodName { + return decodeBananaSequenceBatchesCallData(unpackedCalldata), nil + } else { + return decodeBananaSequenceBatchesValidiumCallData(unpackedCalldata), nil + } default: return nil, fmt.Errorf("no decoder found for method signature: %s", methodSig) } } +type SequencedBatchBanana struct { + Transactions []byte + ForcedGlobalExitRoot common.Hash + ForcedTimestamp uint64 + ForcedBlockHashL1 common.Hash +} + +type SequenceBatchesCalldataBanana struct { + Batches []SequencedBatchBanana + L2Coinbase common.Address + MaxSequenceTimestamp uint64 +} + +func decodeBananaSequenceBatchesCallData(unpackedCalldata map[string]interface{}) *SequenceBatchesCalldataBanana { + unpackedbatches := unpackedCalldata["batches"].([]struct { + Transactions []uint8 `json:"transactions"` + ForcedGlobalExitRoot [32]uint8 `json:"forcedGlobalExitRoot"` + ForcedTimestamp uint64 `json:"forcedTimestamp"` + ForcedBlockHashL1 [32]uint8 `json:"forcedBlockHashL1"` + }) + + calldata := &SequenceBatchesCalldataBanana{ + Batches: make([]SequencedBatchBanana, len(unpackedbatches)), + L2Coinbase: unpackedCalldata["l2Coinbase"].(common.Address), + MaxSequenceTimestamp: unpackedCalldata["maxSequenceTimestamp"].(uint64), + } + + for i, batch := range unpackedbatches { + calldata.Batches[i] = SequencedBatchBanana{ + Transactions: batch.Transactions, + ForcedGlobalExitRoot: common.BytesToHash(batch.ForcedGlobalExitRoot[:]), + ForcedTimestamp: batch.ForcedTimestamp, + ForcedBlockHashL1: common.BytesToHash(batch.ForcedBlockHashL1[:]), + } + } + + return calldata +} + +type SequencedBatchValidiumBanana struct { + TransactionHash common.Hash + ForcedGlobalExitRoot common.Hash + ForcedTimestamp uint64 + ForcedBlockHashL1 common.Hash +} + +type SequenceBatchesCalldataValidiumBanana struct { + Batches []SequencedBatchValidiumBanana + L2Coinbase common.Address + MaxSequenceTimestamp uint64 +} + +func decodeBananaSequenceBatchesValidiumCallData(unpackedCalldata map[string]interface{}) *SequenceBatchesCalldataValidiumBanana { + unpackedbatches := unpackedCalldata["batches"].([]struct { + TransactionHash [32]uint8 `json:"transactionHash"` + ForcedGlobalExitRoot [32]uint8 `json:"forcedGlobalExitRoot"` + ForcedTimestamp uint64 `json:"forcedTimestamp"` + ForcedBlockHashL1 [32]uint8 `json:"forcedBlockHashL1"` + }) + + calldata := &SequenceBatchesCalldataValidiumBanana{ + Batches: make([]SequencedBatchValidiumBanana, len(unpackedbatches)), + L2Coinbase: unpackedCalldata["l2Coinbase"].(common.Address), + MaxSequenceTimestamp: unpackedCalldata["maxSequenceTimestamp"].(uint64), + } + + for i, batch := range unpackedbatches { + calldata.Batches[i] = SequencedBatchValidiumBanana{ + TransactionHash: common.BytesToHash(batch.TransactionHash[:]), + ForcedGlobalExitRoot: common.BytesToHash(batch.ForcedGlobalExitRoot[:]), + ForcedTimestamp: batch.ForcedTimestamp, + ForcedBlockHashL1: common.BytesToHash(batch.ForcedBlockHashL1[:]), + } + } + + return calldata +} + type SequencedBatchElderberry struct { Transactions []byte ForcedGlobalExitRoot common.Hash diff --git a/zk/utils/acc_input_hash.go b/zk/utils/acc_input_hash.go index 2a987015cdb..7a66b899ee9 100644 --- a/zk/utils/acc_input_hash.go +++ b/zk/utils/acc_input_hash.go @@ -8,6 +8,40 @@ import ( "github.com/ledgerwatch/erigon/crypto" ) +func CalculateBananaAccInputHash( + oldAccInputHash common.Hash, + batchTransactionData []byte, + l1InfoRoot common.Hash, + limitTimestamp uint64, + sequencerAddress common.Address, + forcedBlockHashL1 common.Hash, +) *common.Hash { + return CalculateEtrogAccInputHash( + oldAccInputHash, + batchTransactionData, + l1InfoRoot, + limitTimestamp, + sequencerAddress, + forcedBlockHashL1) +} + +func CalculateBananaValidiumAccInputHash( + oldAccInputHash common.Hash, + batchTransactionData common.Hash, + l1InfoRoot common.Hash, + limitTimestamp uint64, + sequencerAddress common.Address, + forcedBlockHashL1 common.Hash, +) *common.Hash { + return CalculateEtrogValidiumAccInputHash( + oldAccInputHash, + batchTransactionData, + l1InfoRoot, + limitTimestamp, + sequencerAddress, + forcedBlockHashL1) +} + // calculates the new accInputHash based on the old one and data frem one new batch // this returns the accInputHash for the current batch // oldAccInputHash - the accInputHash from the previous batch From 050949ad7fe4782c867f1a42cf75f40450514e88 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 7 Nov 2024 15:33:49 +0000 Subject: [PATCH 19/88] tweak(zkevm_api): allow 0x00 accinputhash (#1421) --- turbo/jsonrpc/zkevm_api.go | 3 +++ turbo/jsonrpc/zkevm_api_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index dada5250041..f72ae85d9a0 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -748,6 +748,9 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, } accInputHash = &prevSequenceAccinputHash + if prevSequenceBatch == 0 { + return + } // calculate acc input hash for i := 0; i < int(batchNum-prevSequenceBatch); i++ { accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 4b5736c0cf6..98d48ab2e24 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -480,7 +480,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash("0x97d1524156ccb46723e5c3c87951da9a390499ba288161d879df1dbc03d49afc"), batch.AccInputHash) + assert.Equal(common.HexToHash(common.Hash{}.String()), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) From 6e8775f714f848780f0782b9a0538aad23c7d73e Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:35:40 +0200 Subject: [PATCH 20/88] fix(stage_baches): rpc resequence stop stage on unwind (#1297) * fix(stage_baches): rpc resequence stop stage on unwind * fix: tests * fix: datastream channel write blocking * fix: datastream blocking test * fix: add wait on the datastream connect loop * fix: merge problems * fix: blockhash comparison in stage batches processor * fix: download entries till reaching the amount in header * fix: add go sum package * feat: internal reconnect on each method in datastream client * fix: do not disconnect on stage batches end * feat: add ctx close in datastream reconnections * fix: send stop command after normal stop of reading * feat: retry a fixed number of times in stage batches * fix: return error on ctx done * fix: reverse daastream server version * feat: print ds found block * feat: added more logs in stage batches * fix: check for sync limit in stage batches * fix: sync limit in stage batches * refactor: make unwind test erros a bit more readable * refactor: make unwind tests erorrs more readable * refactor(ds_client): wrap connection read and write to set timeout * fix: add timeout to test clients * fix: stage batches limit * feat: up datastream server version * fix: up datastream server version * fix: go sum * fix: add error handling for set timeouts in datastream client * fix: handle zero checkTImeout value * fix: remove flag setting for datastream timeout * fix: ci config * fix: resequence test timeout * fix: remove timeout from pre-london ci config * refactor: error handling * fix: stop stage on unwind * fix: missing id in client * fix: tests * fix: tests * fix: finish processing blocks on last entry reached * feat: send stop command at start of new cycle to not get timedout by server * fix: remove accidental commit folder * fix: remove unneeded commit * fix: tests * fix: remove unnneeded return * fix: get correct parent block hash * fix: read correct blockhash * fix: unwind on ds block unwind * refactor: error handling in datastream and stage batches * fix: remove unneeded sleep * fix: add a small sleep interval in the entry loop * fix: stop streaming on querying new stuff from ds client * fix: buffer clear before new reads * fix: sleep more in resequence test * fix: cast call * fix: remove wrong flag on cast * fix: cast json flags in test * feat: added wait time for block to be available on sync node * fix: resequence block check test * Fix 'client already started' error on finding common ancestor * Add timeout --------- Co-authored-by: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Co-authored-by: Jerry --- .github/scripts/test_resequence.sh | 37 ++ .github/workflows/ci_zkevm.yml | 8 + .github/workflows/test-resequence.yml | 1 + cmd/utils/flags.go | 2 +- go.mod | 9 +- go.sum | 12 + zk/datastream/client/commands.go | 52 +- zk/datastream/client/stream_client.go | 538 ++++++++++++++------- zk/datastream/client/stream_client_test.go | 84 ++-- zk/datastream/client/utils.go | 39 +- zk/datastream/client/utils_test.go | 95 ++-- zk/datastream/server/data_stream_server.go | 6 +- zk/datastream/types/result.go | 8 + zk/stages/stage_batches.go | 172 ++++--- zk/stages/stage_batches_datastream.go | 69 +-- zk/stages/stage_batches_processor.go | 174 ++++--- zk/stages/stage_batches_test.go | 1 - zk/stages/test_utils.go | 30 +- zk/tests/unwinds/unwind.sh | 32 +- 19 files changed, 815 insertions(+), 554 deletions(-) diff --git a/.github/scripts/test_resequence.sh b/.github/scripts/test_resequence.sh index e80cf7188a4..b36bc878236 100755 --- a/.github/scripts/test_resequence.sh +++ b/.github/scripts/test_resequence.sh @@ -46,6 +46,7 @@ wait_for_l1_batch() { fi if [ "$batch_type" = "virtual" ]; then + current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 --json | jq -r '.[] | select(.topics[0] == "0x3e54d0825ed78523037d00a81759237eb436ce774bd546993ee67a1b67b6e766") | .topics[1]' | tail -n 1 | sed 's/^0x//') current_batch=$((16#$current_batch)) elif [ "$batch_type" = "verified" ]; then @@ -70,6 +71,33 @@ wait_for_l1_batch() { done } + +wait_for_l2_block_number() { + local block_number=$1 + local node_url=$2 + local latest_block=0 + local tries=0 + + #while latest_block lower than block_number + #if more than 5 attempts - throw error + while [ "$latest_block" -lt "$block_number" ]; do + latest_block=$(cast block latest --rpc-url "$node_url" | grep "number" | awk '{print $2}') + if [[ $? -ne 0 ]]; then + echo "Error: Failed to get latest block number" >&2 + return 1 + fi + + if [ "$tries" -ge 5 ]; then + echo "Error: Failed to get block number $block_number" >&2 + return 1 + fi + tries=$((tries + 1)) + + echo "Current block number on $node_url: $latest_block, needed: $block_number. Waiting to try again." + sleep 60 + done +} + stop_cdk_erigon_sequencer() { echo "Stopping cdk-erigon" kurtosis service exec cdk-v1 cdk-erigon-sequencer-001 "pkill -SIGTRAP proc-runner.sh" || true @@ -139,9 +167,18 @@ echo "Calculating comparison block number" comparison_block=$((latest_block - 10)) echo "Block number to compare (10 blocks behind): $comparison_block" +echo "Waiting some time for the syncer to catch up" +sleep 30 + echo "Getting block hash from sequencer" sequencer_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-sequencer-001 rpc)" | grep "hash" | awk '{print $2}') +# wait for block to be available on sync node +if ! wait_for_l2_block_number $comparison_block "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)"; then + echo "Failed to wait for batch verification" + exit 1 +fi + echo "Getting block hash from node" node_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | grep "hash" | awk '{print $2}') diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index f1803011bfd..877f30f08a0 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -105,6 +105,12 @@ jobs: sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml + - name: Configure Kurtosis CDK + working-directory: ./kurtosis-cdk + run: | + /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk @@ -224,6 +230,8 @@ jobs: sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml sed -i '/zkevm\.pool-manager-url/d' ./templates/cdk-erigon/config.yml sed -i '$a\zkevm.disable-virtual-counters: true' ./templates/cdk-erigon/config.yml + sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml + - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 3b273d7cec4..63029d21c56 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -55,6 +55,7 @@ jobs: sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml sed -i '/zkevm.pool-manager-url/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 96fe7d2042f..1c8ece0b9c6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -408,7 +408,7 @@ var ( L2DataStreamerTimeout = cli.StringFlag{ Name: "zkevm.l2-datastreamer-timeout", Usage: "The time to wait for data to arrive from the stream before reporting an error (0s doesn't check)", - Value: "0s", + Value: "3s", } L1SyncStartBlock = cli.Uint64Flag{ Name: "zkevm.l1-sync-start-block", diff --git a/go.mod b/go.mod index 4be1aea6a83..8e2906a27bb 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ replace github.com/ledgerwatch/erigon-lib => ./erigon-lib require ( gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5 + github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 github.com/99designs/gqlgen v0.17.40 github.com/Giulio2002/bls v0.0.0-20240315151443-652e18a3d188 github.com/Masterminds/sprig/v3 v3.2.3 @@ -62,7 +62,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/golang-lru/arc/v2 v2.0.6 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/holiman/uint256 v1.2.4 + github.com/holiman/uint256 v1.3.1 github.com/huandu/xstrings v1.4.0 github.com/huin/goupnp v1.2.0 github.com/iden3/go-iden3-crypto v0.0.15 @@ -110,11 +110,11 @@ require ( golang.org/x/exp v0.0.0-20231226003508-02704c960a9b golang.org/x/net v0.24.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.20.0 golang.org/x/time v0.5.0 google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.2 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 @@ -174,6 +174,7 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c // indirect + github.com/go-delve/delve v1.21.2 // indirect github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect github.com/go-llsqlite/crawshaw v0.4.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index cd90cd9206b..2dc4ef179ba 100644 --- a/go.sum +++ b/go.sum @@ -51,6 +51,10 @@ gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c/go.mod h1:WvSX4JsCRB git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5 h1:p0epAhai44c34G+nzX0CZ67q3vkJtOXlO07lbhAEe9g= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwrVULR+YruSQxVXCt/SmzU= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/99designs/gqlgen v0.17.40 h1:/l8JcEVQ93wqIfmH9VS1jsAkwm6eAF1NwQn3N+SDqBY= github.com/99designs/gqlgen v0.17.40/go.mod h1:b62q1USk82GYIVjC60h02YguAZLqYZtvWml8KkhJps4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -327,6 +331,8 @@ github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= +github.com/go-delve/delve v1.21.2 h1:eaS+ziJo+660mi3D2q/VP8RxW5GcF4Y1zyKSi82alsU= +github.com/go-delve/delve v1.21.2/go.mod h1:FgTAiRUe43RS5EexL06RPyMtP8AMZVL/t9Qqgy3qUe4= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -490,6 +496,8 @@ github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSo github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= @@ -1333,6 +1341,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1547,6 +1557,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= diff --git a/zk/datastream/client/commands.go b/zk/datastream/client/commands.go index 8676a2807eb..d871fb798ab 100644 --- a/zk/datastream/client/commands.go +++ b/zk/datastream/client/commands.go @@ -1,26 +1,19 @@ package client -import "fmt" - const ( // Commands - CmdUnknown Command = 0 - CmdStart Command = 1 - CmdStop Command = 2 - CmdHeader Command = 3 - CmdStartBookmark Command = 4 // CmdStartBookmark for the start from bookmark TCP client command - CmdEntry Command = 5 // CmdEntry for the get entry TCP client command - CmdBookmark Command = 6 // CmdBookmark for the get bookmark TCP client command + CmdUnknown Command = iota + CmdStart + CmdStop + CmdHeader + CmdStartBookmark // CmdStartBookmark for the start from bookmark TCP client command + CmdEntry // CmdEntry for the get entry TCP client command + CmdBookmark // CmdBookmark for the get bookmark TCP client command ) // sendHeaderCmd sends the header command to the server. func (c *StreamClient) sendHeaderCmd() error { - err := c.sendCommand(CmdHeader) - if err != nil { - return fmt.Errorf("%s %v", c.id, err) - } - - return nil + return c.sendCommand(CmdHeader) } // sendBookmarkCmd sends either CmdStartBookmark or CmdBookmark for the provided bookmark value. @@ -38,24 +31,23 @@ func (c *StreamClient) sendBookmarkCmd(bookmark []byte, streaming bool) error { } // Send bookmark length - if err := writeFullUint32ToConn(c.conn, uint32(len(bookmark))); err != nil { + if err := c.writeToConn(uint32(len(bookmark))); err != nil { return err } // Send the bookmark to retrieve - return writeBytesToConn(c.conn, bookmark) + return c.writeToConn(bookmark) } // sendStartCmd sends a start command to the server, indicating // that the client wishes to start streaming from the given entry number. func (c *StreamClient) sendStartCmd(from uint64) error { - err := c.sendCommand(CmdStart) - if err != nil { + if err := c.sendCommand(CmdStart); err != nil { return err } // Send starting/from entry number - return writeFullUint64ToConn(c.conn, from) + return c.writeToConn(from) } // sendEntryCmd sends the get data stream entry by number command to a TCP connection @@ -66,29 +58,21 @@ func (c *StreamClient) sendEntryCmd(entryNum uint64) error { } // Send entry number - return writeFullUint64ToConn(c.conn, entryNum) + return c.writeToConn(entryNum) } // sendHeaderCmd sends the header command to the server. func (c *StreamClient) sendStopCmd() error { - err := c.sendCommand(CmdStop) - if err != nil { - return fmt.Errorf("%s %v", c.id, err) - } - - return nil + return c.sendCommand(CmdStop) } func (c *StreamClient) sendCommand(cmd Command) error { + // Send command - if err := writeFullUint64ToConn(c.conn, uint64(cmd)); err != nil { - return fmt.Errorf("%s %v", c.id, err) + if err := c.writeToConn(uint64(cmd)); err != nil { + return err } // Send stream type - if err := writeFullUint64ToConn(c.conn, uint64(c.streamType)); err != nil { - return fmt.Errorf("%s %v", c.id, err) - } - - return nil + return c.writeToConn(uint64(c.streamType)) } diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index ba85dd8c8ff..e8e96ed9b29 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -41,9 +41,10 @@ type StreamClient struct { version int streamType StreamType conn net.Conn - id string // Client id checkTimeout time.Duration // time to wait for data before reporting an error + header *types.HeaderEntry + // atomic lastWrittenTime atomic.Int64 streaming atomic.Bool @@ -78,7 +79,6 @@ func NewClient(ctx context.Context, server string, version int, checkTimeout tim server: server, version: version, streamType: StSequencer, - id: "", entryChan: make(chan interface{}, 100000), currentFork: uint64(latestDownloadedForkId), } @@ -94,50 +94,80 @@ func (c *StreamClient) GetEntryChan() *chan interface{} { return &c.entryChan } +func (c *StreamClient) GetEntryNumberLimit() uint64 { + return c.header.TotalEntries +} + +var ( + ErrFailedAttempts = errors.New("failed to get the L2 block within 5 attempts") +) + // GetL2BlockByNumber queries the data stream by sending the L2 block start bookmark for the certain block number // and streams the changes for that block (including the transactions). // Note that this function is intended for on demand querying and it disposes the connection after it ends. -func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, int, error) { - if _, err := c.EnsureConnected(); err != nil { - return nil, -1, err - } - defer c.Stop() - +func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.FullL2Block, err error) { var ( - l2Block *types.FullL2Block - err error - isL2Block bool + connected bool = c.conn != nil ) + count := 0 + for { + select { + case <-c.ctx.Done(): + return nil, fmt.Errorf("context done - stopping") + + default: + } + if count > 5 { + return nil, ErrFailedAttempts + } + if connected { + if err := c.stopStreamingIfStarted(); err != nil { + return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + } + + if fullBLock, err = c.getL2BlockByNumber(blockNum); err == nil { + break + } + + if errors.Is(err, types.ErrAlreadyStarted) { + // if the client is already started, we can stop the client and try again + c.Stop() + } else if !errors.Is(err, ErrSocket) { + return nil, fmt.Errorf("getL2BlockByNumber: %w", err) + } + + } + time.Sleep(1 * time.Second) + connected = c.handleSocketError(err) + count++ + } + + return fullBLock, nil +} + +func (c *StreamClient) getL2BlockByNumber(blockNum uint64) (l2Block *types.FullL2Block, err error) { + var isL2Block bool bookmark := types.NewBookmarkProto(blockNum, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) bookmarkRaw, err := bookmark.Marshal() if err != nil { - return nil, -1, err + return nil, fmt.Errorf("bookmark.Marshal: %w", err) } - re, err := c.initiateDownloadBookmark(bookmarkRaw) - if err != nil { - errorCode := -1 - if re != nil { - errorCode = int(re.ErrorNum) - } - return nil, errorCode, err + if _, err := c.initiateDownloadBookmark(bookmarkRaw); err != nil { + return nil, fmt.Errorf("initiateDownloadBookmark: %w", err) } for l2Block == nil { select { case <-c.ctx.Done(): - errorCode := -1 - if re != nil { - errorCode = int(re.ErrorNum) - } - return l2Block, errorCode, nil + return l2Block, fmt.Errorf("context done - stopping") default: } - parsedEntry, err := ReadParsedProto(c) + parsedEntry, _, err := ReadParsedProto(c) if err != nil { - return nil, -1, err + return nil, fmt.Errorf("ReadParsedProto: %w", err) } l2Block, isL2Block = parsedEntry.(*types.FullL2Block) @@ -147,41 +177,88 @@ func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, } if l2Block.L2BlockNumber != blockNum { - return nil, -1, fmt.Errorf("expected block number %d but got %d", blockNum, l2Block.L2BlockNumber) + return nil, fmt.Errorf("expected block number %d but got %d", blockNum, l2Block.L2BlockNumber) } - return l2Block, types.CmdErrOK, nil + return l2Block, nil } // GetLatestL2Block queries the data stream by reading the header entry and based on total entries field, // it retrieves the latest File entry that is of EntryTypeL2Block type. // Note that this function is intended for on demand querying and it disposes the connection after it ends. func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error) { - if _, err := c.EnsureConnected(); err != nil { - return nil, err + var ( + connected bool = c.conn != nil + ) + count := 0 + for { + select { + case <-c.ctx.Done(): + return nil, errors.New("context done - stopping") + default: + } + if count > 5 { + return nil, ErrFailedAttempts + } + if connected { + if err := c.stopStreamingIfStarted(); err != nil { + return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + } + + if l2Block, err = c.getLatestL2Block(); err == nil { + break + } + if !errors.Is(err, ErrSocket) { + return nil, fmt.Errorf("getLatestL2Block: %w", err) + } + } + + time.Sleep(1 * time.Second) + connected = c.handleSocketError(err) + count++ + } + return l2Block, nil +} + +// don't check for errors here, we just need to empty the socket for next reads +func (c *StreamClient) stopStreamingIfStarted() error { + if c.streaming.Load() { + c.sendStopCmd() + c.streaming.Store(false) + } + + // empty the socket buffer + for { + c.conn.SetReadDeadline(time.Now().Add(100)) + if _, err := c.readBuffer(100); err != nil { + break + } } - defer c.Stop() + return nil +} + +func (c *StreamClient) getLatestL2Block() (l2Block *types.FullL2Block, err error) { h, err := c.GetHeader() if err != nil { - return nil, err + return nil, fmt.Errorf("GetHeader: %w", err) } latestEntryNum := h.TotalEntries - 1 for l2Block == nil && latestEntryNum > 0 { if err := c.sendEntryCmdWrapper(latestEntryNum); err != nil { - return nil, err + return nil, fmt.Errorf("sendEntryCmdWrapper: %w", err) } entry, err := c.NextFileEntry() if err != nil { - return nil, err + return nil, fmt.Errorf("NextFileEntry: %w", err) } if entry.EntryType == types.EntryTypeL2Block { if l2Block, err = types.UnmarshalL2Block(entry.Data); err != nil { - return nil, err + return nil, fmt.Errorf("UnmarshalL2Block: %w", err) } } @@ -189,7 +266,7 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error } if latestEntryNum == 0 { - return nil, errors.New("failed to retrieve the latest block from the data stream") + return nil, errors.New("no block found") } return l2Block, nil @@ -198,9 +275,7 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error func (c *StreamClient) GetLastWrittenTimeAtomic() *atomic.Int64 { return &c.lastWrittenTime } -func (c *StreamClient) GetStreamingAtomic() *atomic.Bool { - return &c.streaming -} + func (c *StreamClient) GetProgressAtomic() *atomic.Uint64 { return &c.progress } @@ -211,11 +286,9 @@ func (c *StreamClient) Start() error { var err error c.conn, err = net.Dial("tcp", c.server) if err != nil { - return fmt.Errorf("error connecting to server %s: %v", c.server, err) + return fmt.Errorf("connecting to server %s: %w", c.server, err) } - c.id = c.conn.LocalAddr().String() - return nil } @@ -224,61 +297,59 @@ func (c *StreamClient) Stop() { return } if err := c.sendStopCmd(); err != nil { - log.Warn(fmt.Sprintf("Failed to send the stop command to the data stream server: %s", err)) + log.Warn(fmt.Sprintf("send stop command: %v", err)) } - c.conn.Close() - c.conn = nil - - c.clearEntryCHannel() + // c.conn.Close() + // c.conn = nil } // Command header: Get status // Returns the current status of the header. // If started, terminate the connection. func (c *StreamClient) GetHeader() (*types.HeaderEntry, error) { + if err := c.stopStreamingIfStarted(); err != nil { + return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + } + if err := c.sendHeaderCmd(); err != nil { - return nil, fmt.Errorf("%s send header error: %v", c.id, err) + return nil, fmt.Errorf("sendHeaderCmd: %w", err) } // Read packet - packet, err := readBuffer(c.conn, 1) + packet, err := c.readBuffer(1) if err != nil { - return nil, fmt.Errorf("%s read buffer: %v", c.id, err) + return nil, fmt.Errorf("readBuffer: %w", err) } // Check packet type if packet[0] != PtResult { - return nil, fmt.Errorf("%s error expecting result packet type %d and received %d", c.id, PtResult, packet[0]) + return nil, fmt.Errorf("expecting result packet type %d and received %d", PtResult, packet[0]) } // Read server result entry for the command - r, err := c.readResultEntry(packet) - if err != nil { - return nil, fmt.Errorf("%s read result entry error: %v", c.id, err) - } - if err := r.GetError(); err != nil { - return nil, fmt.Errorf("%s got Result error code %d: %v", c.id, r.ErrorNum, err) + if _, err := c.readResultEntry(packet); err != nil { + return nil, fmt.Errorf("readResultEntry: %w", err) } // Read header entry h, err := c.readHeaderEntry() if err != nil { - return nil, fmt.Errorf("%s read header entry error: %v", c.id, err) + return nil, fmt.Errorf("readHeaderEntry: %w", err) } + c.header = h + return h, nil } // sendEntryCmdWrapper sends CmdEntry command and reads packet type and decodes result entry. func (c *StreamClient) sendEntryCmdWrapper(entryNum uint64) error { if err := c.sendEntryCmd(entryNum); err != nil { - return err + return fmt.Errorf("sendEntryCmd: %w", err) } - if re, err := c.readPacketAndDecodeResultEntry(); err != nil { - return fmt.Errorf("failed to retrieve the result entry: %w", err) - } else if err := re.GetError(); err != nil { - return err + if _, err := c.readPacketAndDecodeResultEntry(); err != nil { + return fmt.Errorf("readPacketAndDecodeResultEntry: %w", err) } return nil @@ -288,16 +359,16 @@ func (c *StreamClient) ExecutePerFile(bookmark *types.BookmarkProto, function fu // Get header from server header, err := c.GetHeader() if err != nil { - return fmt.Errorf("%s get header error: %v", c.id, err) + return fmt.Errorf("GetHeader: %w", err) } protoBookmark, err := bookmark.Marshal() if err != nil { - return fmt.Errorf("failed to marshal bookmark: %v", err) + return fmt.Errorf("bookmark.Marshal: %w", err) } if _, err := c.initiateDownloadBookmark(protoBookmark); err != nil { - return err + return fmt.Errorf("initiateDownloadBookmark: %w", err) } count := uint64(0) logTicker := time.NewTicker(10 * time.Second) @@ -313,10 +384,10 @@ func (c *StreamClient) ExecutePerFile(bookmark *types.BookmarkProto, function fu } file, err := c.NextFileEntry() if err != nil { - return fmt.Errorf("reading file entry: %v", err) + return fmt.Errorf("NextFileEntry: %w", err) } if err := function(file); err != nil { - return fmt.Errorf("executing function: %v", err) + return fmt.Errorf("execute function: %w", err) } count++ @@ -326,45 +397,83 @@ func (c *StreamClient) ExecutePerFile(bookmark *types.BookmarkProto, function fu } func (c *StreamClient) clearEntryCHannel() { - select { - case <-c.entryChan: - close(c.entryChan) + defer func() { for range c.entryChan { } - default: - } + }() + defer func() { + if r := recover(); r != nil { + log.Warn("[datastream_client] Channel is already closed") + } + }() + + close(c.entryChan) } // close old entry chan and read all elements before opening a new one -func (c *StreamClient) renewEntryChannel() { +func (c *StreamClient) RenewEntryChannel() { c.clearEntryCHannel() c.entryChan = make(chan interface{}, entryChannelSize) } -func (c *StreamClient) EnsureConnected() (bool, error) { - if c.conn == nil { - if err := c.tryReConnect(); err != nil { - return false, fmt.Errorf("failed to reconnect the datastream client: %w", err) +func (c *StreamClient) ReadAllEntriesToChannel() (err error) { + var ( + connected bool = c.conn != nil + ) + count := 0 + for { + select { + case <-c.ctx.Done(): + return fmt.Errorf("context done - stopping") + default: + } + if connected { + if err := c.stopStreamingIfStarted(); err != nil { + return fmt.Errorf("stopStreamingIfStarted: %w", err) + } + + if err = c.readAllEntriesToChannel(); err == nil { + break + } + if !errors.Is(err, ErrSocket) { + return fmt.Errorf("readAllEntriesToChannel: %w", err) + } } - c.renewEntryChannel() + time.Sleep(1 * time.Second) + connected = c.handleSocketError(err) + count++ } - return true, nil + return nil +} + +func (c *StreamClient) handleSocketError(socketErr error) bool { + if socketErr != nil { + log.Warn(fmt.Sprintf("%v", socketErr)) + } + if err := c.tryReConnect(); err != nil { + log.Warn(fmt.Sprintf("try reconnect: %v", err)) + return false + } + + c.RenewEntryChannel() + + return true } // reads entries to the end of the stream // at end will wait for new entries to arrive -func (c *StreamClient) ReadAllEntriesToChannel() error { +func (c *StreamClient) readAllEntriesToChannel() (err error) { c.streaming.Store(true) - defer c.streaming.Store(false) + c.stopReadingToChannel.Store(false) var bookmark *types.BookmarkProto progress := c.progress.Load() if progress == 0 { bookmark = types.NewBookmarkProto(0, datastream.BookmarkType_BOOKMARK_TYPE_BATCH) } else { - bookmark = types.NewBookmarkProto(progress, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) + bookmark = types.NewBookmarkProto(progress+1, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) } protoBookmark, err := bookmark.Marshal() @@ -374,64 +483,50 @@ func (c *StreamClient) ReadAllEntriesToChannel() error { // send start command if _, err := c.initiateDownloadBookmark(protoBookmark); err != nil { - return err + return fmt.Errorf("initiateDownloadBookmark: %w", err) } if err := c.readAllFullL2BlocksToChannel(); err != nil { - err2 := fmt.Errorf("%s read full L2 blocks error: %v", c.id, err) - - if c.conn != nil { - if err2 := c.conn.Close(); err2 != nil { - log.Error("failed to close connection after error", "original-error", err, "new-error", err2) - } - c.conn = nil - } - - return err2 + return fmt.Errorf("readAllFullL2BlocksToChannel: %w", err) } - return nil + return } // runs the prerequisites for entries download func (c *StreamClient) initiateDownloadBookmark(bookmark []byte) (*types.ResultEntry, error) { // send CmdStartBookmark command if err := c.sendBookmarkCmd(bookmark, true); err != nil { - return nil, err + return nil, fmt.Errorf("sendBookmarkCmd: %w", err) } re, err := c.afterStartCommand() if err != nil { - return re, fmt.Errorf("after start command error: %v", err) + return re, fmt.Errorf("afterStartCommand: %w", err) } return re, nil } func (c *StreamClient) afterStartCommand() (*types.ResultEntry, error) { - re, err := c.readPacketAndDecodeResultEntry() - if err != nil { - return nil, err - } - - if err := re.GetError(); err != nil { - return re, fmt.Errorf("got Result error code %d: %v", re.ErrorNum, err) - } - - return re, nil + return c.readPacketAndDecodeResultEntry() } // reads all entries from the server and sends them to a channel // sends the parsed FullL2Blocks with transactions to a channel -func (c *StreamClient) readAllFullL2BlocksToChannel() error { - var err error - +func (c *StreamClient) readAllFullL2BlocksToChannel() (err error) { + readNewProto := true + entryNum := uint64(0) + parsedProto := interface{}(nil) LOOP: for { select { default: case <-c.ctx.Done(): - log.Warn("[Datastream client] Context done - stopping") + return fmt.Errorf("context done - stopping") + } + + if c.stopReadingToChannel.Load() { break LOOP } @@ -439,52 +534,70 @@ LOOP: c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout)) } - parsedProto, localErr := ReadParsedProto(c) - if localErr != nil { - err = localErr - break + if readNewProto { + if parsedProto, entryNum, err = ReadParsedProto(c); err != nil { + return err + } + readNewProto = false } c.lastWrittenTime.Store(time.Now().UnixNano()) switch parsedProto := parsedProto.(type) { case *types.BookmarkProto: + readNewProto = true continue case *types.BatchStart: c.currentFork = parsedProto.ForkId - c.entryChan <- parsedProto case *types.GerUpdate: - c.entryChan <- parsedProto case *types.BatchEnd: - c.entryChan <- parsedProto case *types.FullL2Block: parsedProto.ForkId = c.currentFork - log.Trace("writing block to channel", "blockNumber", parsedProto.L2BlockNumber, "batchNumber", parsedProto.BatchNumber) - c.entryChan <- parsedProto + log.Trace("[Datastream client] writing block to channel", "blockNumber", parsedProto.L2BlockNumber, "batchNumber", parsedProto.BatchNumber) + default: + return fmt.Errorf("unexpected entry type: %v", parsedProto) + } + select { + case c.entryChan <- parsedProto: + readNewProto = true default: - err = fmt.Errorf("unexpected entry type: %v", parsedProto) + time.Sleep(10 * time.Microsecond) + } + + if c.header.TotalEntries == entryNum+1 { + log.Trace("[Datastream client] reached the current end of the stream", "header_totalEntries", c.header.TotalEntries, "entryNum", entryNum) + + retries := 0 + INTERNAL_LOOP: + for { + select { + case c.entryChan <- nil: + break INTERNAL_LOOP + default: + if retries > 5 { + return errors.New("[Datastream client] failed to write final entry to channel after 5 retries") + } + retries++ + log.Warn("[Datastream client] Channel is full, waiting to write nil and end stream client read") + time.Sleep(1 * time.Second) + } + } break LOOP } } - return err + return nil } -func (c *StreamClient) tryReConnect() error { - var err error - for i := 0; i < 50; i++ { - if c.conn != nil { - if err := c.conn.Close(); err != nil { - log.Warn(fmt.Sprintf("[%d. iteration] failed to close the DS connection: %s", i+1, err)) - return err - } - c.conn = nil - } - if err = c.Start(); err != nil { - log.Warn(fmt.Sprintf("[%d. iteration] failed to start the DS connection: %s", i+1, err)) - time.Sleep(5 * time.Second) - continue +func (c *StreamClient) tryReConnect() (err error) { + if c.conn != nil { + if err := c.conn.Close(); err != nil { + log.Warn(fmt.Sprintf("close DS connection: %v", err)) + return err } - return nil + c.conn = nil + } + if err = c.Start(); err != nil { + log.Warn(fmt.Sprintf("start DS connection: %v", err)) } return err @@ -496,21 +609,24 @@ func (c *StreamClient) StopReadingToChannel() { type FileEntryIterator interface { NextFileEntry() (*types.FileEntry, error) + GetEntryNumberLimit() uint64 } func ReadParsedProto(iterator FileEntryIterator) ( parsedEntry interface{}, + entryNum uint64, err error, ) { file, err := iterator.NextFileEntry() if err != nil { - err = fmt.Errorf("read file entry error: %w", err) + err = fmt.Errorf("NextFileEntry: %w", err) return } if file == nil { - return nil, nil + return } + entryNum = file.EntryNum switch file.EntryType { case types.BookmarkEntryType: @@ -524,6 +640,7 @@ func ReadParsedProto(iterator FileEntryIterator) ( case types.EntryTypeL2Block: var l2Block *types.FullL2Block if l2Block, err = types.UnmarshalL2Block(file.Data); err != nil { + err = fmt.Errorf("UnmarshalL2Block: %w", err) return } @@ -534,17 +651,20 @@ func ReadParsedProto(iterator FileEntryIterator) ( LOOP: for { if innerFile, err = iterator.NextFileEntry(); err != nil { + err = fmt.Errorf("NextFileEntry: %w", err) return } - + entryNum = innerFile.EntryNum if innerFile.IsL2Tx() { if l2Tx, err = types.UnmarshalTx(innerFile.Data); err != nil { + err = fmt.Errorf("UnmarshalTx: %w", err) return } txs = append(txs, *l2Tx) } else if innerFile.IsL2BlockEnd() { var l2BlockEnd *types.L2BlockEndProto if l2BlockEnd, err = types.UnmarshalL2BlockEnd(innerFile.Data); err != nil { + err = fmt.Errorf("UnmarshalL2BlockEnd: %w", err) return } if l2BlockEnd.GetBlockNumber() != l2Block.L2BlockNumber { @@ -555,6 +675,11 @@ func ReadParsedProto(iterator FileEntryIterator) ( } else if innerFile.IsBookmark() { var bookmark *types.BookmarkProto if bookmark, err = types.UnmarshalBookmark(innerFile.Data); err != nil || bookmark == nil { + if err != nil { + err = fmt.Errorf("UnmarshalBookmark: %w", err) + } else { + err = fmt.Errorf("unexpected nil bookmark") + } return } if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK { @@ -565,6 +690,7 @@ func ReadParsedProto(iterator FileEntryIterator) ( } } else if innerFile.IsBatchEnd() { if _, err = types.UnmarshalBatchEnd(file.Data); err != nil { + err = fmt.Errorf("UnmarshalBatchEnd: %w", err) return } break LOOP @@ -572,6 +698,9 @@ func ReadParsedProto(iterator FileEntryIterator) ( err = fmt.Errorf("unexpected entry type inside a block: %d", innerFile.EntryType) return } + if entryNum == iterator.GetEntryNumberLimit() { + break LOOP + } } l2Block.L2Txs = txs @@ -579,12 +708,17 @@ func ReadParsedProto(iterator FileEntryIterator) ( return case types.EntryTypeL2BlockEnd: log.Debug(fmt.Sprintf("retrieved EntryTypeL2BlockEnd: %+v", file)) + parsedEntry, err = types.UnmarshalL2BlockEnd(file.Data) + if err != nil { + err = fmt.Errorf("UnmarshalL2BlockEnd: %w", err) + } return case types.EntryTypeL2Tx: err = errors.New("unexpected L2 tx entry, found outside of block") default: err = fmt.Errorf("unexpected entry type: %d", file.EntryType) } + return } @@ -592,21 +726,17 @@ func ReadParsedProto(iterator FileEntryIterator) ( // returns the parsed FileEntry func (c *StreamClient) NextFileEntry() (file *types.FileEntry, err error) { // Read packet type - packet, err := readBuffer(c.conn, 1) + packet, err := c.readBuffer(1) if err != nil { - return file, fmt.Errorf("failed to read packet type: %v", err) + return file, fmt.Errorf("readBuffer: %w", err) } packetType := packet[0] // Check packet type if packetType == PtResult { // Read server result entry for the command - r, err := c.readResultEntry(packet) - if err != nil { - return file, err - } - if err := r.GetError(); err != nil { - return file, fmt.Errorf("got Result error code %d: %v", r.ErrorNum, err) + if _, err := c.readResultEntry(packet); err != nil { + return file, fmt.Errorf("readResultEntry: %w", err) } return file, nil } else if packetType != PtData && packetType != PtDataRsp { @@ -614,9 +744,9 @@ func (c *StreamClient) NextFileEntry() (file *types.FileEntry, err error) { } // Read the rest of fixed size fields - buffer, err := readBuffer(c.conn, types.FileEntryMinSize-1) + buffer, err := c.readBuffer(types.FileEntryMinSize - 1) if err != nil { - return file, fmt.Errorf("error reading file bytes: %v", err) + return file, fmt.Errorf("reading file bytes: readBuffer: %w", err) } if packetType != PtData { @@ -627,19 +757,19 @@ func (c *StreamClient) NextFileEntry() (file *types.FileEntry, err error) { // Read variable field (data) length := binary.BigEndian.Uint32(buffer[1:5]) if length < types.FileEntryMinSize { - return file, errors.New("error reading data entry: wrong data length") + return file, errors.New("reading data entry: wrong data length") } // Read rest of the file data - bufferAux, err := readBuffer(c.conn, length-types.FileEntryMinSize) + bufferAux, err := c.readBuffer(length - types.FileEntryMinSize) if err != nil { - return file, fmt.Errorf("error reading file data bytes: %v", err) + return file, fmt.Errorf("reading file data bytes: readBuffer: %w", err) } buffer = append(buffer, bufferAux...) // Decode binary data to data entry struct if file, err = types.DecodeFileEntry(buffer); err != nil { - return file, fmt.Errorf("decode file entry error: %v", err) + return file, fmt.Errorf("DecodeFileEntry: %w", err) } if file.EntryType == types.EntryTypeNotFound { @@ -654,24 +784,24 @@ func (c *StreamClient) NextFileEntry() (file *types.FileEntry, err error) { func (c *StreamClient) readHeaderEntry() (h *types.HeaderEntry, err error) { // Read header stream bytes - binaryHeader, err := readBuffer(c.conn, types.HeaderSizePreEtrog) + binaryHeader, err := c.readBuffer(types.HeaderSizePreEtrog) if err != nil { - return h, fmt.Errorf("failed to read header bytes %v", err) + return h, fmt.Errorf("read header bytes: readBuffer: %w", err) } headLength := binary.BigEndian.Uint32(binaryHeader[1:5]) if headLength == types.HeaderSize { // Read the rest of fixed size fields - buffer, err := readBuffer(c.conn, types.HeaderSize-types.HeaderSizePreEtrog) + buffer, err := c.readBuffer(types.HeaderSize - types.HeaderSizePreEtrog) if err != nil { - return h, fmt.Errorf("failed to read header bytes %v", err) + return h, fmt.Errorf("read header bytes: readBuffer: %w", err) } binaryHeader = append(binaryHeader, buffer...) } // Decode bytes stream to header entry struct if h, err = types.DecodeHeaderEntry(binaryHeader); err != nil { - return h, fmt.Errorf("error decoding binary header: %v", err) + return h, fmt.Errorf("DecodeHeaderEntry: %w", err) } return @@ -685,28 +815,45 @@ func (c *StreamClient) readResultEntry(packet []byte) (re *types.ResultEntry, er } // Read the rest of fixed size fields - buffer, err := readBuffer(c.conn, types.ResultEntryMinSize-1) + buffer, err := c.readBuffer(types.ResultEntryMinSize - 1) if err != nil { - return re, fmt.Errorf("failed to read main result bytes %v", err) + return re, fmt.Errorf("read main result bytes: readBuffer: %w", err) } buffer = append(packet, buffer...) // Read variable field (errStr) length := binary.BigEndian.Uint32(buffer[1:5]) if length < types.ResultEntryMinSize { - return re, fmt.Errorf("%s Error reading result entry", c.id) + return re, errors.New("failed reading result entry") } // read the rest of the result - bufferAux, err := readBuffer(c.conn, length-types.ResultEntryMinSize) + bufferAux, err := c.readBuffer(length - types.ResultEntryMinSize) if err != nil { - return re, fmt.Errorf("failed to read result errStr bytes %v", err) + return re, fmt.Errorf("read result errStr bytes: readBuffer: %w", err) } buffer = append(buffer, bufferAux...) // Decode binary entry result if re, err = types.DecodeResultEntry(buffer); err != nil { - return re, fmt.Errorf("decode result entry error: %v", err) + return re, fmt.Errorf("DecodeResultEntry: %w", err) + } + + if !re.IsOk() { + switch re.ErrorNum { + case types.CmdErrAlreadyStarted: + return re, fmt.Errorf("%w: %s", types.ErrAlreadyStarted, re.ErrorStr) + case types.CmdErrAlreadyStopped: + return re, fmt.Errorf("%w: %s", types.ErrAlreadyStopped, re.ErrorStr) + case types.CmdErrBadFromEntry: + return re, fmt.Errorf("%w: %s", types.ErrBadFromEntry, re.ErrorStr) + case types.CmdErrBadFromBookmark: + return re, fmt.Errorf("%w: %s", types.ErrBadFromBookmark, re.ErrorStr) + case types.CmdErrInvalidCommand: + return re, fmt.Errorf("%w: %s", types.ErrInvalidCommand, re.ErrorStr) + default: + return re, fmt.Errorf("unknown error code: %s", re.ErrorStr) + } } return re, nil @@ -715,16 +862,71 @@ func (c *StreamClient) readResultEntry(packet []byte) (re *types.ResultEntry, er // readPacketAndDecodeResultEntry reads the packet from the connection and tries to decode the ResultEntry from it. func (c *StreamClient) readPacketAndDecodeResultEntry() (*types.ResultEntry, error) { // Read packet - packet, err := readBuffer(c.conn, 1) + packet, err := c.readBuffer(1) if err != nil { - return nil, fmt.Errorf("read buffer error: %w", err) + return nil, fmt.Errorf("read buffer: %w", err) } // Read server result entry for the command r, err := c.readResultEntry(packet) if err != nil { - return nil, fmt.Errorf("read result entry error: %w", err) + return nil, fmt.Errorf("readResultEntry: %w", err) } return r, nil } + +func (c *StreamClient) readBuffer(amount uint32) ([]byte, error) { + if err := c.resetReadTimeout(); err != nil { + return nil, fmt.Errorf("resetReadTimeout: %w", err) + } + return readBuffer(c.conn, amount) +} + +func (c *StreamClient) writeToConn(data interface{}) error { + if err := c.resetWriteTimeout(); err != nil { + return fmt.Errorf("resetWriteTimeout: %w", err) + } + switch parsed := data.(type) { + case []byte: + if err := writeBytesToConn(c.conn, parsed); err != nil { + return fmt.Errorf("writeBytesToConn: %w", err) + } + case uint32: + if err := writeFullUint32ToConn(c.conn, parsed); err != nil { + return fmt.Errorf("writeFullUint32ToConn: %w", err) + } + case uint64: + if err := writeFullUint64ToConn(c.conn, parsed); err != nil { + return fmt.Errorf("writeFullUint64ToConn: %w", err) + } + default: + return errors.New("unexpected write type") + } + + return nil +} + +func (c *StreamClient) resetWriteTimeout() error { + if c.checkTimeout == 0 { + return nil + } + + if err := c.conn.SetWriteDeadline(time.Now().Add(c.checkTimeout)); err != nil { + return fmt.Errorf("%w: conn.SetWriteDeadline: %v", ErrSocket, err) + } + + return nil +} + +func (c *StreamClient) resetReadTimeout() error { + if c.checkTimeout == 0 { + return nil + } + + if err := c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout)); err != nil { + return fmt.Errorf("%w: conn.SetReadDeadline: %v", ErrSocket, err) + } + + return nil +} diff --git a/zk/datastream/client/stream_client_test.go b/zk/datastream/client/stream_client_test.go index 05e0cd80149..f8078889e6b 100644 --- a/zk/datastream/client/stream_client_test.go +++ b/zk/datastream/client/stream_client_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/binary" - "errors" "fmt" "net" "sync" @@ -27,7 +26,7 @@ func TestStreamClientReadHeaderEntry(t *testing.T) { name string input []byte expectedResult *types.HeaderEntry - expectedError error + expectedError string } testCases := []testCase{ { @@ -40,18 +39,18 @@ func TestStreamClientReadHeaderEntry(t *testing.T) { TotalLength: 24, TotalEntries: 64, }, - expectedError: nil, + expectedError: "", }, { name: "Invalid byte array length", input: []byte{20, 21, 22, 23, 24, 20}, expectedResult: nil, - expectedError: errors.New("failed to read header bytes reading from server: unexpected EOF"), + expectedError: "read header bytes: readBuffer: socket error: io.ReadFull: unexpected EOF", }, } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -64,7 +63,11 @@ func TestStreamClientReadHeaderEntry(t *testing.T) { }() header, err := c.readHeaderEntry() - require.Equal(t, testCase.expectedError, err) + if testCase.expectedError != "" { + require.EqualError(t, err, testCase.expectedError) + } else { + require.NoError(t, err) + } assert.DeepEqual(t, testCase.expectedResult, header) }) } @@ -75,7 +78,7 @@ func TestStreamClientReadResultEntry(t *testing.T) { name string input []byte expectedResult *types.ResultEntry - expectedError error + expectedError string } testCases := []testCase{ { @@ -87,7 +90,7 @@ func TestStreamClientReadResultEntry(t *testing.T) { ErrorNum: 0, ErrorStr: []byte{}, }, - expectedError: nil, + expectedError: "", }, { name: "Happy path - error str length", @@ -98,24 +101,24 @@ func TestStreamClientReadResultEntry(t *testing.T) { ErrorNum: 0, ErrorStr: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, }, - expectedError: nil, + expectedError: "", }, { name: "Invalid byte array length", input: []byte{20, 21, 22, 23, 24, 20}, expectedResult: nil, - expectedError: errors.New("failed to read main result bytes reading from server: unexpected EOF"), + expectedError: "read main result bytes: readBuffer: socket error: io.ReadFull: unexpected EOF", }, { name: "Invalid error length", input: []byte{0, 0, 0, 12, 0, 0, 0, 0, 20, 21}, expectedResult: nil, - expectedError: errors.New("failed to read result errStr bytes reading from server: unexpected EOF"), + expectedError: "read result errStr bytes: readBuffer: socket error: io.ReadFull: unexpected EOF", }, } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -128,7 +131,11 @@ func TestStreamClientReadResultEntry(t *testing.T) { }() result, err := c.readResultEntry([]byte{1}) - require.Equal(t, testCase.expectedError, err) + if testCase.expectedError != "" { + require.EqualError(t, err, testCase.expectedError) + } else { + require.NoError(t, err) + } assert.DeepEqual(t, testCase.expectedResult, result) }) } @@ -139,7 +146,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { name string input []byte expectedResult *types.FileEntry - expectedError error + expectedError string } testCases := []testCase{ { @@ -152,7 +159,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { EntryNum: 45, Data: []byte{0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 64}, }, - expectedError: nil, + expectedError: "", }, { name: "Happy path - no data", input: []byte{2, 0, 0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45}, @@ -163,28 +170,28 @@ func TestStreamClientReadFileEntry(t *testing.T) { EntryNum: 45, Data: []byte{}, }, - expectedError: nil, + expectedError: "", }, { name: "Invalid packet type", input: []byte{5, 0, 0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45}, expectedResult: nil, - expectedError: errors.New("expected data packet type 2 or 254 and received 5"), + expectedError: "expected data packet type 2 or 254 and received 5", }, { name: "Invalid byte array length", input: []byte{2, 21, 22, 23, 24, 20}, expectedResult: nil, - expectedError: errors.New("error reading file bytes: reading from server: unexpected EOF"), + expectedError: "reading file bytes: readBuffer: socket error: io.ReadFull: unexpected EOF", }, { name: "Invalid data length", input: []byte{2, 0, 0, 0, 31, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 64}, expectedResult: nil, - expectedError: errors.New("error reading file data bytes: reading from server: unexpected EOF"), + expectedError: "reading file data bytes: readBuffer: socket error: io.ReadFull: unexpected EOF", }, } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) server, conn := net.Pipe() defer c.Stop() defer server.Close() @@ -197,16 +204,25 @@ func TestStreamClientReadFileEntry(t *testing.T) { }() result, err := c.NextFileEntry() - require.Equal(t, testCase.expectedError, err) + if testCase.expectedError != "" { + require.EqualError(t, err, testCase.expectedError) + } else { + require.NoError(t, err) + } assert.DeepEqual(t, testCase.expectedResult, result) }) } } func TestStreamClientReadParsedProto(t *testing.T) { - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) serverConn, clientConn := net.Pipe() c.conn = clientConn + c.checkTimeout = 1 * time.Second + + c.header = &types.HeaderEntry{ + TotalEntries: 3, + } defer func() { serverConn.Close() clientConn.Close() @@ -253,7 +269,7 @@ func TestStreamClientReadParsedProto(t *testing.T) { close(errCh) }() - parsedEntry, err := ReadParsedProto(c) + parsedEntry, entryNum, err := ReadParsedProto(c) require.NoError(t, err) serverErr := <-errCh require.NoError(t, serverErr) @@ -261,6 +277,7 @@ func TestStreamClientReadParsedProto(t *testing.T) { expectedL2Block := types.ConvertToFullL2Block(l2Block) expectedL2Block.L2Txs = append(expectedL2Block.L2Txs, *expectedL2Tx) require.Equal(t, expectedL2Block, parsedEntry) + require.Equal(t, uint64(3), entryNum) } func TestStreamClientGetLatestL2Block(t *testing.T) { @@ -270,9 +287,9 @@ func TestStreamClientGetLatestL2Block(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) c.conn = clientConn - + c.checkTimeout = 1 * time.Second expectedL2Block, _ := createL2BlockAndTransactions(t, 5, 0) l2BlockProto := &types.L2BlockProto{L2Block: expectedL2Block} l2BlockRaw, err := l2BlockProto.Marshal() @@ -383,9 +400,12 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 0, 0) + c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c.header = &types.HeaderEntry{ + TotalEntries: 4, + } c.conn = clientConn - + c.checkTimeout = 1 * time.Second bookmark := types.NewBookmarkProto(blockNum, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) bookmarkRaw, err := bookmark.Marshal() require.NoError(t, err) @@ -472,13 +492,15 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { go createServerResponses(t, serverConn, bookmarkRaw, l2BlockRaw, l2TxsRaw, l2BlockEndRaw, errCh) - l2Block, errCode, err := c.GetL2BlockByNumber(blockNum) + l2Block, err := c.GetL2BlockByNumber(blockNum) require.NoError(t, err) - require.Equal(t, types.CmdErrOK, errCode) - serverErr := <-errCh + var serverErr error + select { + case serverErr = <-errCh: + default: + } require.NoError(t, serverErr) - l2TxsProto := make([]types.L2TransactionProto, len(l2Txs)) for i, tx := range l2Txs { l2TxProto := types.ConvertToL2TransactionProto(tx) diff --git a/zk/datastream/client/utils.go b/zk/datastream/client/utils.go index e96e623e202..e27323865fe 100644 --- a/zk/datastream/client/utils.go +++ b/zk/datastream/client/utils.go @@ -8,32 +8,33 @@ import ( "net" ) -// writeFullUint64ToConn writes a uint64 to a connection +var ( + ErrSocket = errors.New("socket error") + ErrNilConnection = errors.New("nil connection") +) + func writeFullUint64ToConn(conn net.Conn, value uint64) error { buffer := make([]byte, 8) binary.BigEndian.PutUint64(buffer, value) if conn == nil { - return errors.New("error nil connection") + return fmt.Errorf("%w: %w", ErrSocket, ErrNilConnection) } - _, err := conn.Write(buffer) - if err != nil { - return fmt.Errorf("%s Error sending to server: %v", conn.RemoteAddr().String(), err) + if _, err := conn.Write(buffer); err != nil { + return fmt.Errorf("%w: conn.Write: %v", ErrSocket, err) } return nil } -// writeFullUint64ToConn writes a uint64 to a connection func writeBytesToConn(conn net.Conn, value []byte) error { if conn == nil { - return errors.New("error nil connection") + return fmt.Errorf("%w: %w", ErrSocket, ErrNilConnection) } - _, err := conn.Write(value) - if err != nil { - return fmt.Errorf("%s Error sending to server: %v", conn.RemoteAddr().String(), err) + if _, err := conn.Write(value); err != nil { + return fmt.Errorf("%w: conn.Write: %w", ErrSocket, err) } return nil @@ -45,12 +46,11 @@ func writeFullUint32ToConn(conn net.Conn, value uint32) error { binary.BigEndian.PutUint32(buffer, value) if conn == nil { - return errors.New("error nil connection") + return fmt.Errorf("%w: %w", ErrSocket, ErrNilConnection) } - _, err := conn.Write(buffer) - if err != nil { - return fmt.Errorf("%s Error sending to server: %v", conn.RemoteAddr().String(), err) + if _, err := conn.Write(buffer); err != nil { + return fmt.Errorf("%w: conn.Write: %w", ErrSocket, err) } return nil @@ -61,7 +61,7 @@ func readBuffer(conn net.Conn, n uint32) ([]byte, error) { buffer := make([]byte, n) rbc, err := io.ReadFull(conn, buffer) if err != nil { - return []byte{}, parseIoReadError(err) + return []byte{}, fmt.Errorf("%w: io.ReadFull: %w", ErrSocket, err) } if uint32(rbc) != n { @@ -70,12 +70,3 @@ func readBuffer(conn net.Conn, n uint32) ([]byte, error) { return buffer, nil } - -// parseIoReadError parses an error returned from io.ReadFull and returns a more concrete one -func parseIoReadError(err error) error { - if err == io.EOF { - return errors.New("server close connection") - } else { - return fmt.Errorf("reading from server: %v", err) - } -} diff --git a/zk/datastream/client/utils_test.go b/zk/datastream/client/utils_test.go index 3047bc25557..89ec1613137 100644 --- a/zk/datastream/client/utils_test.go +++ b/zk/datastream/client/utils_test.go @@ -1,8 +1,6 @@ package client import ( - "errors" - "fmt" "io" "net" "testing" @@ -27,10 +25,10 @@ func Test_WriteFullUint64ToConn(t *testing.T) { expectedError: nil, }, { - name: "happy path", + name: "nil connection", input: 10, shouldOpenConn: false, - expectedError: errors.New("error nil connection"), + expectedError: ErrNilConnection, }, } @@ -48,8 +46,9 @@ func Test_WriteFullUint64ToConn(t *testing.T) { err = writeFullUint64ToConn(client, testCase.input) } else { err = writeFullUint64ToConn(nil, testCase.input) + require.ErrorIs(t, err, ErrSocket) } - require.Equal(t, testCase.expectedError, err) + require.ErrorIs(t, err, testCase.expectedError) }) } } @@ -70,10 +69,10 @@ func Test_WriteFullUint32ToConn(t *testing.T) { expectedError: nil, }, { - name: "happy path", + name: "nil connection", input: 10, shouldOpenConn: false, - expectedError: errors.New("error nil connection"), + expectedError: ErrNilConnection, }, } @@ -91,8 +90,53 @@ func Test_WriteFullUint32ToConn(t *testing.T) { err = writeFullUint32ToConn(client, testCase.input) } else { err = writeFullUint32ToConn(nil, testCase.input) + require.ErrorIs(t, err, ErrSocket) + } + require.ErrorIs(t, err, testCase.expectedError) + }) + } +} + +func Test_WriteBytesToConn(t *testing.T) { + type testCase struct { + name string + input []byte + shouldOpenConn bool + expectedError error + } + + testCases := []testCase{ + { + name: "happy path", + input: []byte{1, 2, 3, 4, 5}, + shouldOpenConn: true, + expectedError: nil, + }, + { + name: "nil connection", + input: []byte{1, 2, 3, 4, 5}, + shouldOpenConn: false, + expectedError: ErrNilConnection, + }, + } + + for _, testCase := range testCases { + server, client := net.Pipe() + defer server.Close() + t.Run(testCase.name, func(t *testing.T) { + go func() { + buffer := make([]byte, len(testCase.input)) + io.ReadFull(server, buffer) + }() + + var err error + if testCase.shouldOpenConn { + err = writeBytesToConn(client, testCase.input) + } else { + err = writeBytesToConn(nil, testCase.input) + require.ErrorIs(t, err, ErrSocket) } - require.Equal(t, testCase.expectedError, err) + require.ErrorIs(t, err, testCase.expectedError) }) } } @@ -122,7 +166,7 @@ func Test_ReadBuffer(t *testing.T) { name: "test error", input: 6, expectedResult: []byte{}, - expectedError: fmt.Errorf("reading from server: %v", io.ErrUnexpectedEOF), + expectedError: io.ErrUnexpectedEOF, }, } @@ -136,36 +180,11 @@ func Test_ReadBuffer(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { result, err := readBuffer(client, testCase.input) - require.Equal(t, testCase.expectedError, err) + require.ErrorIs(t, err, testCase.expectedError) + if testCase.expectedError != nil { + require.ErrorIs(t, err, ErrSocket) + } assert.DeepEqual(t, testCase.expectedResult, result) }) } } - -func Test_ParseIoReadError(t *testing.T) { - type testCase struct { - name string - input error - expectedError error - } - - testCases := []testCase{ - { - name: "io error", - input: io.EOF, - expectedError: errors.New("server close connection"), - }, - { - name: "test error", - input: errors.New("test error"), - expectedError: errors.New("reading from server: test error"), - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - result := parseIoReadError(testCase.input) - require.Equal(t, testCase.expectedError, result) - }) - } -} diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 9c559d2da3c..93eb3c6c27c 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -624,6 +624,10 @@ func newDataStreamServerIterator(stream *datastreamer.StreamServer, start uint64 } } +func (it *dataStreamServerIterator) GetEntryNumberLimit() uint64 { + return it.header + 1 +} + func (it *dataStreamServerIterator) NextFileEntry() (entry *types.FileEntry, err error) { if it.curEntryNum > it.header { return nil, nil @@ -669,7 +673,7 @@ func ReadBatches(iterator client.FileEntryIterator, start uint64, end uint64) ([ LOOP_ENTRIES: for { - parsedProto, err := client.ReadParsedProto(iterator) + parsedProto, _, err := client.ReadParsedProto(iterator) if err != nil { return nil, err } diff --git a/zk/datastream/types/result.go b/zk/datastream/types/result.go index 1e6652cbb9d..6414acdf057 100644 --- a/zk/datastream/types/result.go +++ b/zk/datastream/types/result.go @@ -20,6 +20,14 @@ const ( CmdErrInvalidCommand = 9 // CmdErrInvalidCommand for invalid/unknown command error ) +var ( + ErrAlreadyStarted = errors.New("client already started") + ErrAlreadyStopped = errors.New("client already stopped") + ErrBadFromEntry = errors.New("invalid starting entry number") + ErrBadFromBookmark = errors.New("invalid starting bookmark") + ErrInvalidCommand = errors.New("invalid/unknown command") +) + type ResultEntry struct { PacketType uint8 // 0xff:Result Length uint32 diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index 2c0644e44c2..ed2b8291fa4 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -26,7 +26,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/zk/datastream/client" "github.com/ledgerwatch/log/v3" ) @@ -59,13 +58,13 @@ type HermezDb interface { } type DatastreamClient interface { + RenewEntryChannel() ReadAllEntriesToChannel() error + StopReadingToChannel() GetEntryChan() *chan interface{} - GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, int, error) + GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, error) GetLatestL2Block() (*types.FullL2Block, error) - GetStreamingAtomic() *atomic.Bool GetProgressAtomic() *atomic.Uint64 - EnsureConnected() (bool, error) Start() error Stop() } @@ -73,7 +72,6 @@ type DatastreamClient interface { type DatastreamReadRunner interface { StartRead() StopRead() - RestartReadFromBlock(fromBlock uint64) } type dsClientCreatorHandler func(context.Context, *ethconfig.Zk, uint64) (DatastreamClient, error) @@ -153,6 +151,15 @@ func SpawnStageBatches( //// BISECT //// if cfg.zkCfg.DebugLimit > 0 && stageProgressBlockNo > cfg.zkCfg.DebugLimit { + log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "debugLimit", cfg.zkCfg.DebugLimit) + time.Sleep(2 * time.Second) + return nil + } + + // this limit is blocknumber not included, so up to limit-1 + if cfg.zkCfg.SyncLimit > 0 && stageProgressBlockNo+1 >= cfg.zkCfg.SyncLimit { + log.Info(fmt.Sprintf("[%s] Sync limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "syncLimit", cfg.zkCfg.SyncLimit) + time.Sleep(2 * time.Second) return nil } @@ -169,26 +176,55 @@ func SpawnStageBatches( return err } - dsQueryClient, err := newStreamClient(ctx, cfg, latestForkId) + dsQueryClient, stopDsClient, err := newStreamClient(ctx, cfg, latestForkId) if err != nil { log.Warn(fmt.Sprintf("[%s] %s", logPrefix, err)) return err } - defer dsQueryClient.Stop() + defer stopDsClient() - highestDSL2Block, err := dsQueryClient.GetLatestL2Block() - if err != nil { - return fmt.Errorf("failed to retrieve the latest datastream l2 block: %w", err) - } + var highestDSL2Block *types.FullL2Block + newBlockCheckStartTIme := time.Now() + for { + select { + case <-ctx.Done(): + return nil + default: + } + highestDSL2Block, err = dsQueryClient.GetLatestL2Block() + if err != nil { + // if we return error, stage will replay and block all other stages + log.Warn(fmt.Sprintf("[%s] Failed to get latest l2 block from datastream: %v", logPrefix, err)) + return nil + } - if highestDSL2Block.L2BlockNumber < stageProgressBlockNo { - stageProgressBlockNo = highestDSL2Block.L2BlockNumber + // a lower block should also break the loop because that means the datastream was unwound + // thus we should unwind as well and continue from there + if highestDSL2Block.L2BlockNumber != stageProgressBlockNo { + log.Info(fmt.Sprintf("[%s] Highest block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "stageProgressBlockNo", stageProgressBlockNo) + break + } + if time.Since(newBlockCheckStartTIme) > 10*time.Second { + log.Info(fmt.Sprintf("[%s] Waiting for at least one new block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "last processed block", stageProgressBlockNo) + newBlockCheckStartTIme = time.Now() + } + time.Sleep(1 * time.Second) } log.Debug(fmt.Sprintf("[%s] Highest block in db and datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "dbBlock", stageProgressBlockNo) + unwindFn := func(unwindBlock uint64) (uint64, error) { + return rollback(logPrefix, eriDb, hermezDb, dsQueryClient, unwindBlock, tx, u) + } + if highestDSL2Block.L2BlockNumber < stageProgressBlockNo { + log.Info(fmt.Sprintf("[%s] Datastream behind, unwinding", logPrefix)) + if _, err := unwindFn(highestDSL2Block.L2BlockNumber); err != nil { + return err + } + return nil + } - dsClientProgress := cfg.dsClient.GetProgressAtomic() - dsClientProgress.Store(stageProgressBlockNo) + dsClientProgress := dsQueryClient.GetProgressAtomic() + dsClientProgress.Swap(stageProgressBlockNo) // start a routine to print blocks written progress progressChan, stopProgressPrinter := zk.ProgressPrinterWithoutTotal(fmt.Sprintf("[%s] Downloaded blocks from datastream progress", logPrefix)) @@ -212,25 +248,25 @@ func SpawnStageBatches( log.Info(fmt.Sprintf("[%s] Reading blocks from the datastream.", logPrefix)) - unwindFn := func(unwindBlock uint64) error { - return rollback(logPrefix, eriDb, hermezDb, dsQueryClient, unwindBlock, tx, u) + lastProcessedBlockHash, err := eriDb.ReadCanonicalHash(stageProgressBlockNo) + if err != nil { + return fmt.Errorf("failed to read canonical hash for block %d: %w", stageProgressBlockNo, err) } - batchProcessor, err := NewBatchesProcessor(ctx, logPrefix, tx, hermezDb, eriDb, cfg.zkCfg.SyncLimit, cfg.zkCfg.DebugLimit, cfg.zkCfg.DebugStepAfter, cfg.zkCfg.DebugStep, stageProgressBlockNo, stageProgressBatchNo, dsQueryClient, progressChan, cfg.chainConfig, cfg.miningConfig, unwindFn) + batchProcessor, err := NewBatchesProcessor(ctx, logPrefix, tx, hermezDb, eriDb, cfg.zkCfg.SyncLimit, cfg.zkCfg.DebugLimit, cfg.zkCfg.DebugStepAfter, cfg.zkCfg.DebugStep, stageProgressBlockNo, stageProgressBatchNo, lastProcessedBlockHash, dsQueryClient, progressChan, cfg.chainConfig, cfg.miningConfig, unwindFn) if err != nil { return err } // start routine to download blocks and push them in a channel - dsClientRunner := NewDatastreamClientRunner(cfg.dsClient, logPrefix) + dsClientRunner := NewDatastreamClientRunner(dsQueryClient, logPrefix) dsClientRunner.StartRead() defer dsClientRunner.StopRead() - entryChan := cfg.dsClient.GetEntryChan() + entryChan := dsQueryClient.GetEntryChan() - prevAmountBlocksWritten, restartDatastreamBlock := uint64(0), uint64(0) + prevAmountBlocksWritten := uint64(0) endLoop := false - unwound := false for { // get batch start and use to update forkid @@ -240,40 +276,19 @@ func SpawnStageBatches( // if both download routine stopped and channel empty - stop loop select { case entry := <-*entryChan: - if restartDatastreamBlock, endLoop, unwound, err = batchProcessor.ProcessEntry(entry); err != nil { + if endLoop, err = batchProcessor.ProcessEntry(entry); err != nil { + // if we triggered an unwind somewhere we need to return from the stage + if err == ErrorTriggeredUnwind { + return nil + } return err } dsClientProgress.Store(batchProcessor.LastBlockHeight()) - - if restartDatastreamBlock > 0 { - if err = dsClientRunner.RestartReadFromBlock(restartDatastreamBlock); err != nil { - return err - } - } - - // if we triggered an unwind somewhere we need to return from the stage - if unwound { - return nil - } case <-ctx.Done(): log.Warn(fmt.Sprintf("[%s] Context done", logPrefix)) endLoop = true default: - time.Sleep(1 * time.Second) - } - - // if ds end reached check again for new blocks in the stream - // if there are too many new blocks get them as well before ending stage - if batchProcessor.LastBlockHeight() >= highestDSL2Block.L2BlockNumber { - newLatestDSL2Block, err := dsQueryClient.GetLatestL2Block() - if err != nil { - return fmt.Errorf("failed to retrieve the latest datastream l2 block: %w", err) - } - if newLatestDSL2Block.L2BlockNumber > highestDSL2Block.L2BlockNumber+NEW_BLOCKS_ON_DS_LIMIT { - highestDSL2Block = newLatestDSL2Block - } else { - endLoop = true - } + time.Sleep(10 * time.Millisecond) } if endLoop { @@ -606,25 +621,33 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx // 1. queries the latest common ancestor for datastream and db, // 2. resolves the unwind block (as the latest block in the previous batch, comparing to the found ancestor block) // 3. triggers the unwinding -func rollback(logPrefix string, eriDb *erigon_db.ErigonDb, hermezDb *hermez_db.HermezDb, - dsQueryClient DatastreamClient, latestDSBlockNum uint64, tx kv.RwTx, u stagedsync.Unwinder) error { +func rollback( + logPrefix string, + eriDb *erigon_db.ErigonDb, + hermezDb *hermez_db.HermezDb, + dsQueryClient DatastreamClient, + latestDSBlockNum uint64, + tx kv.RwTx, + u stagedsync.Unwinder, +) (uint64, error) { ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(eriDb, hermezDb, dsQueryClient, latestDSBlockNum) if err != nil { - return err + return 0, err } log.Debug(fmt.Sprintf("[%s] The common ancestor for datastream and db is block %d (%s)", logPrefix, ancestorBlockNum, ancestorBlockHash)) unwindBlockNum, unwindBlockHash, batchNum, err := getUnwindPoint(eriDb, hermezDb, ancestorBlockNum, ancestorBlockHash) if err != nil { - return err + return 0, err } if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, batchNum-1); err != nil { - return err + return 0, err } log.Warn(fmt.Sprintf("[%s] Unwinding to block %d (%s)", logPrefix, unwindBlockNum, unwindBlockHash)) + u.UnwindTo(unwindBlockNum, stagedsync.BadBlock(unwindBlockHash, fmt.Errorf("unwind to block %d", unwindBlockNum))) - return nil + return unwindBlockNum, nil } // findCommonAncestor searches the latest common ancestor block number and hash between the data stream and the local db. @@ -651,21 +674,21 @@ func findCommonAncestor( } midBlockNum := (startBlockNum + endBlockNum) / 2 - midBlockDataStream, errCode, err := dsClient.GetL2BlockByNumber(midBlockNum) + midBlockDataStream, err := dsClient.GetL2BlockByNumber(midBlockNum) if err != nil && // the required block might not be in the data stream, so ignore that error - errCode != types.CmdErrBadFromBookmark { - return 0, emptyHash, err + !errors.Is(err, types.ErrBadFromBookmark) { + return 0, emptyHash, fmt.Errorf("GetL2BlockByNumber: failed to get l2 block %d from datastream: %w", midBlockNum, err) } midBlockDbHash, err := db.ReadCanonicalHash(midBlockNum) if err != nil { - return 0, emptyHash, err + return 0, emptyHash, fmt.Errorf("ReadCanonicalHash: failed to get canonical hash for block %d: %w", midBlockNum, err) } dbBatchNum, err := hermezDb.GetBatchNoByL2Block(midBlockNum) if err != nil { - return 0, emptyHash, err + return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block: failed to get batch number for block %d: %w", midBlockNum, err) } if midBlockDataStream != nil && @@ -701,37 +724,34 @@ func getUnwindPoint(eriDb erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHer unwindBlockNum, _, err := hermezDb.GetHighestBlockInBatch(batchNum - 1) if err != nil { - return 0, emptyHash, 0, err + return 0, emptyHash, 0, fmt.Errorf("GetHighestBlockInBatch: batch %d: %w", batchNum-1, err) } unwindBlockHash, err := eriDb.ReadCanonicalHash(unwindBlockNum) if err != nil { - return 0, emptyHash, 0, err + return 0, emptyHash, 0, fmt.Errorf("ReadCanonicalHash: block %d: %w", unwindBlockNum, err) } return unwindBlockNum, unwindBlockHash, batchNum, nil } // newStreamClient instantiates new datastreamer client and starts it. -func newStreamClient(ctx context.Context, cfg BatchesCfg, latestForkId uint64) (DatastreamClient, error) { - var ( - dsClient DatastreamClient - err error - ) - +func newStreamClient(ctx context.Context, cfg BatchesCfg, latestForkId uint64) (dsClient DatastreamClient, stopFn func(), err error) { if cfg.dsQueryClientCreator != nil { dsClient, err = cfg.dsQueryClientCreator(ctx, cfg.zkCfg, latestForkId) if err != nil { - return nil, fmt.Errorf("failed to create a datastream client. Reason: %w", err) + return nil, nil, fmt.Errorf("dsQueryClientCreator: %w", err) + } + if err := dsClient.Start(); err != nil { + return nil, nil, fmt.Errorf("dsClient.Start: %w", err) + } + stopFn = func() { + dsClient.Stop() } } else { - zkCfg := cfg.zkCfg - dsClient = client.NewClient(ctx, zkCfg.L2DataStreamerUrl, zkCfg.DatastreamVersion, zkCfg.L2DataStreamerTimeout, uint16(latestForkId)) - } - - if err := dsClient.Start(); err != nil { - return nil, fmt.Errorf("failed to start a datastream client. Reason: %w", err) + dsClient = cfg.dsClient + stopFn = func() {} } - return dsClient, nil + return dsClient, stopFn, nil } diff --git a/zk/stages/stage_batches_datastream.go b/zk/stages/stage_batches_datastream.go index fefd7c17188..a1f9926e067 100644 --- a/zk/stages/stage_batches_datastream.go +++ b/zk/stages/stage_batches_datastream.go @@ -4,7 +4,6 @@ import ( "fmt" "math/rand" "sync/atomic" - "time" "github.com/ledgerwatch/log/v3" ) @@ -24,10 +23,13 @@ func NewDatastreamClientRunner(dsClient DatastreamClient, logPrefix string) *Dat } func (r *DatastreamClientRunner) StartRead() error { + r.dsClient.RenewEntryChannel() if r.isReading.Load() { return fmt.Errorf("tried starting datastream client runner thread while another is running") } + r.stopRunner.Store(false) + go func() { routineId := rand.Intn(1000000) @@ -37,27 +39,8 @@ func (r *DatastreamClientRunner) StartRead() error { r.isReading.Store(true) defer r.isReading.Store(false) - for { - if r.stopRunner.Load() { - log.Info(fmt.Sprintf("[%s] Downloading L2Blocks routine stopped intentionally", r.logPrefix)) - break - } - - // start routine to download blocks and push them in a channel - if !r.dsClient.GetStreamingAtomic().Load() { - log.Info(fmt.Sprintf("[%s] Starting stream", r.logPrefix)) - // this will download all blocks from datastream and push them in a channel - // if no error, break, else continue trying to get them - // Create bookmark - - if err := r.connectDatastream(); err != nil { - log.Error(fmt.Sprintf("[%s] Error connecting to datastream", r.logPrefix), "error", err) - } - - if err := r.dsClient.ReadAllEntriesToChannel(); err != nil { - log.Error(fmt.Sprintf("[%s] Error downloading blocks from datastream", r.logPrefix), "error", err) - } - } + if err := r.dsClient.ReadAllEntriesToChannel(); err != nil { + log.Warn(fmt.Sprintf("[%s] Error downloading blocks from datastream", r.logPrefix), "error", err) } }() @@ -65,44 +48,6 @@ func (r *DatastreamClientRunner) StartRead() error { } func (r *DatastreamClientRunner) StopRead() { - r.stopRunner.Store(true) -} - -func (r *DatastreamClientRunner) RestartReadFromBlock(fromBlock uint64) error { - r.StopRead() - - //wait for the old routine to be finished before continuing - counter := 0 - for { - if !r.isReading.Load() { - break - } - counter++ - if counter > 100 { - return fmt.Errorf("failed to stop reader routine correctly") - } - time.Sleep(100 * time.Millisecond) - } - - // set new block - r.dsClient.GetProgressAtomic().Store(fromBlock) - - log.Info(fmt.Sprintf("[%s] Restarting datastream from block %d", r.logPrefix, fromBlock)) - - return r.StartRead() -} - -func (r *DatastreamClientRunner) connectDatastream() (err error) { - var connected bool - for i := 0; i < 5; i++ { - if connected, err = r.dsClient.EnsureConnected(); err != nil { - log.Error(fmt.Sprintf("[%s] Error connecting to datastream", r.logPrefix), "error", err) - continue - } - if connected { - return nil - } - } - - return fmt.Errorf("failed to connect to datastream") + r.stopRunner.Swap(true) + r.dsClient.StopReadingToChannel() } diff --git a/zk/stages/stage_batches_processor.go b/zk/stages/stage_batches_processor.go index 404812f9320..0cb20853111 100644 --- a/zk/stages/stage_batches_processor.go +++ b/zk/stages/stage_batches_processor.go @@ -6,7 +6,6 @@ import ( "fmt" "math/big" "sync/atomic" - "time" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" @@ -21,6 +20,11 @@ import ( "github.com/ledgerwatch/log/v3" ) +var ( + ErrorTriggeredUnwind = errors.New("triggered unwind") + ErrorSkippedBlock = errors.New("skipped block") +) + type ProcessorErigonDb interface { WriteHeader(batchNo *big.Int, blockHash common.Hash, stateRoot, txHash, parentHash common.Hash, coinbase common.Address, ts, gasLimit uint64, chainConfig *chain.Config) (*ethTypes.Header, error) WriteBody(batchNo *big.Int, headerHash common.Hash, txs []ethTypes.Transaction) error @@ -34,6 +38,7 @@ type ProcessorHermezDb interface { WriteEffectiveGasPricePercentage(txHash common.Hash, effectiveGasPricePercentage uint8) error WriteStateRoot(l2BlockNumber uint64, rpcRoot common.Hash) error + GetStateRoot(l2BlockNumber uint64) (common.Hash, error) CheckGlobalExitRootWritten(ger common.Hash) (bool, error) WriteBlockGlobalExitRoot(l2BlockNo uint64, ger common.Hash) error @@ -51,7 +56,7 @@ type ProcessorHermezDb interface { } type DsQueryClient interface { - GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, int, error) + GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, error) GetProgressAtomic() *atomic.Uint64 } @@ -66,20 +71,21 @@ type BatchesProcessor struct { debugStepAfter, debugStep, stageProgressBlockNo, - lastForkId, highestHashableL2BlockNo, - lastBlockHeight, + lastForkId uint64 + highestL1InfoTreeIndex uint32 + dsQueryClient DsQueryClient + progressChan chan uint64 + unwindFn func(uint64) (uint64, error) + highestSeenBatchNo, + lastBlockHeight, blocksWritten, highestVerifiedBatch uint64 - highestL1InfoTreeIndex uint32 lastBlockRoot, lastBlockHash common.Hash - dsQueryClient DsQueryClient - progressChan chan uint64 - unwindFn func(uint64) error - chainConfig *chain.Config - miningConfig *params.MiningConfig + chainConfig *chain.Config + miningConfig *params.MiningConfig } func NewBatchesProcessor( @@ -89,11 +95,12 @@ func NewBatchesProcessor( hermezDb ProcessorHermezDb, eriDb ProcessorErigonDb, syncBlockLimit, debugBlockLimit, debugStepAfter, debugStep, stageProgressBlockNo, stageProgressBatchNo uint64, + lastProcessedBlockHash common.Hash, dsQueryClient DsQueryClient, progressChan chan uint64, chainConfig *chain.Config, miningConfig *params.MiningConfig, - unwindFn func(uint64) error, + unwindFn func(uint64) (uint64, error), ) (*BatchesProcessor, error) { highestVerifiedBatch, err := stages.GetStageProgress(tx, stages.L1VerificationsBatchNo) if err != nil { @@ -121,7 +128,7 @@ func NewBatchesProcessor( highestVerifiedBatch: highestVerifiedBatch, dsQueryClient: dsQueryClient, progressChan: progressChan, - lastBlockHash: emptyHash, + lastBlockHash: lastProcessedBlockHash, lastBlockRoot: emptyHash, lastForkId: lastForkId, unwindFn: unwindFn, @@ -130,18 +137,20 @@ func NewBatchesProcessor( }, nil } -func (p *BatchesProcessor) ProcessEntry(entry interface{}) (rollbackBlock uint64, endLoop bool, unwound bool, err error) { +func (p *BatchesProcessor) ProcessEntry(entry interface{}) (endLoop bool, err error) { switch entry := entry.(type) { case *types.BatchStart: - return 0, false, false, p.processBatchStartEntry(entry) + return false, p.processBatchStartEntry(entry) case *types.BatchEnd: - return 0, false, false, p.processBatchEndEntry(entry) + return false, p.processBatchEndEntry(entry) case *types.FullL2Block: return p.processFullBlock(entry) case *types.GerUpdate: - return 0, false, false, p.processGerUpdate(entry) + return false, p.processGerUpdate(entry) + case nil: // we use nil to indicate the end of stream read + return true, nil default: - return 0, false, false, fmt.Errorf("unknown entry type: %T", entry) + return false, fmt.Errorf("unknown entry type: %T", entry) } } @@ -153,7 +162,7 @@ func (p *BatchesProcessor) processGerUpdate(gerUpdate *types.GerUpdate) error { // NB: we won't get these post Etrog (fork id 7) if err := p.hermezDb.WriteBatchGlobalExitRoot(gerUpdate.BatchNumber, gerUpdate); err != nil { - return fmt.Errorf("write batch global exit root error: %v", err) + return fmt.Errorf("write batch global exit root error: %w", err) } return nil @@ -187,113 +196,102 @@ func (p *BatchesProcessor) processBatchStartEntry(batchStart *types.BatchStart) return nil } -func (p *BatchesProcessor) processFullBlock(blockEntry *types.FullL2Block) (restartStreamFromBlock uint64, endLoop bool, unwound bool, err error) { +func (p *BatchesProcessor) unwind(blockNum uint64) (uint64, error) { + unwindBlock, err := p.unwindFn(blockNum) + if err != nil { + return 0, err + } + + return unwindBlock, nil +} + +func (p *BatchesProcessor) processFullBlock(blockEntry *types.FullL2Block) (endLoop bool, err error) { log.Debug(fmt.Sprintf("[%s] Retrieved %d (%s) block from stream", p.logPrefix, blockEntry.L2BlockNumber, blockEntry.L2Blockhash.String())) if p.syncBlockLimit > 0 && blockEntry.L2BlockNumber >= p.syncBlockLimit { // stop the node going into a crazy loop - time.Sleep(2 * time.Second) - return 0, true, false, nil + log.Info(fmt.Sprintf("[%s] Sync block limit reached, stopping stage", p.logPrefix), "blockLimit", p.syncBlockLimit, "block", blockEntry.L2BlockNumber) + return true, nil } - // handle batch boundary changes - we do this here instead of reading the batch start channel because - // channels can be read in random orders which then creates problems in detecting fork changes during - // execution if blockEntry.BatchNumber > p.highestSeenBatchNo && p.lastForkId < blockEntry.ForkId { if blockEntry.ForkId >= uint64(chain.ImpossibleForkId) { - message := fmt.Sprintf("unsupported fork id %v received from the data stream", blockEntry.ForkId) + message := fmt.Sprintf("unsupported fork id %d received from the data stream", blockEntry.ForkId) panic(message) } if err = stages.SaveStageProgress(p.tx, stages.ForkId, blockEntry.ForkId); err != nil { - return 0, false, false, fmt.Errorf("save stage progress error: %v", err) + return false, fmt.Errorf("save stage progress error: %w", err) } p.lastForkId = blockEntry.ForkId if err = p.hermezDb.WriteForkId(blockEntry.BatchNumber, blockEntry.ForkId); err != nil { - return 0, false, false, fmt.Errorf("write fork id error: %v", err) + return false, fmt.Errorf("write fork id error: %w", err) } // NOTE (RPC): avoided use of 'writeForkIdBlockOnce' by reading instead batch by forkId, and then lowest block number in batch } // ignore genesis or a repeat of the last block if blockEntry.L2BlockNumber == 0 { - return 0, false, false, nil + return false, nil } // skip but warn on already processed blocks if blockEntry.L2BlockNumber <= p.stageProgressBlockNo { - if blockEntry.L2BlockNumber < p.stageProgressBlockNo { + dbBatchNum, err := p.hermezDb.GetBatchNoByL2Block(blockEntry.L2BlockNumber) + if err != nil { + return false, err + } + + if blockEntry.L2BlockNumber == p.stageProgressBlockNo && dbBatchNum == blockEntry.BatchNumber { // only warn if the block is very old, we expect the very latest block to be requested // when the stage is fired up for the first time log.Warn(fmt.Sprintf("[%s] Skipping block %d, already processed", p.logPrefix, blockEntry.L2BlockNumber)) + return false, nil } - dbBatchNum, err := p.hermezDb.GetBatchNoByL2Block(blockEntry.L2BlockNumber) - if err != nil { - return 0, false, false, err - } - - if blockEntry.BatchNumber > dbBatchNum { - // if the batch number is higher than the one we know about, it means that we need to trigger an unwinding of blocks - log.Warn(fmt.Sprintf("[%s] Batch number mismatch detected. Triggering unwind...", p.logPrefix), - "block", blockEntry.L2BlockNumber, "ds batch", blockEntry.BatchNumber, "db batch", dbBatchNum) - if err := p.unwindFn(blockEntry.L2BlockNumber); err != nil { - return blockEntry.L2BlockNumber, false, false, err - } + // if the block is older or the batch number is different, we need to unwind because the block has definately changed + log.Warn(fmt.Sprintf("[%s] Block already processed. Triggering unwind...", p.logPrefix), + "block", blockEntry.L2BlockNumber, "ds batch", blockEntry.BatchNumber, "db batch", dbBatchNum) + if _, err := p.unwind(blockEntry.L2BlockNumber); err != nil { + return false, err } - return 0, false, false, nil + return false, ErrorTriggeredUnwind } var dbParentBlockHash common.Hash if blockEntry.L2BlockNumber > 1 { dbParentBlockHash, err = p.eriDb.ReadCanonicalHash(p.lastBlockHeight) if err != nil { - return 0, false, false, fmt.Errorf("failed to retrieve parent block hash for datastream block %d: %w", + return false, fmt.Errorf("failed to retrieve parent block hash for datastream block %d: %w", blockEntry.L2BlockNumber, err) } } - dsParentBlockHash := p.lastBlockHash - dsBlockNumber := p.lastBlockHeight - if dsParentBlockHash == emptyHash { - parentBlockDS, _, err := p.dsQueryClient.GetL2BlockByNumber(blockEntry.L2BlockNumber - 1) - if err != nil { - return 0, false, false, err - } - - if parentBlockDS != nil { - dsParentBlockHash = parentBlockDS.L2Blockhash - if parentBlockDS.L2BlockNumber > 0 { - dsBlockNumber = parentBlockDS.L2BlockNumber - } - } - } - - if blockEntry.L2BlockNumber > 1 && dbParentBlockHash != dsParentBlockHash { + if p.lastBlockHeight > 0 && dbParentBlockHash != p.lastBlockHash { // unwind/rollback blocks until the latest common ancestor block log.Warn(fmt.Sprintf("[%s] Parent block hashes mismatch on block %d. Triggering unwind...", p.logPrefix, blockEntry.L2BlockNumber), "db parent block hash", dbParentBlockHash, - "ds parent block number", dsBlockNumber, - "ds parent block hash", dsParentBlockHash, + "ds parent block number", p.lastBlockHeight, + "ds parent block hash", p.lastBlockHash, "ds parent block number", blockEntry.L2BlockNumber-1, ) //parent blockhash is wrong, so unwind to it, then restat stream from it to get the correct one - if err = p.unwindFn(blockEntry.L2BlockNumber - 1); err != nil { - return 0, false, false, err + if _, err := p.unwind(blockEntry.L2BlockNumber - 1); err != nil { + return false, err } - return blockEntry.L2BlockNumber - 1, false, true, nil + return false, ErrorTriggeredUnwind } - // unwind if we already have this block - could be a re-sequence event + // unwind if we already have this block if blockEntry.L2BlockNumber < p.lastBlockHeight+1 { - log.Warn(fmt.Sprintf("[%s] Skipping block %d, already processed, triggering unwind...", p.logPrefix, blockEntry.L2BlockNumber)) - if err = p.unwindFn(blockEntry.L2BlockNumber); err != nil { - return 0, false, false, err + log.Warn(fmt.Sprintf("[%s] Block %d, already processed unwinding...", p.logPrefix, blockEntry.L2BlockNumber)) + if _, err := p.unwind(blockEntry.L2BlockNumber); err != nil { + return false, err } - return blockEntry.L2BlockNumber, false, true, nil + + return false, ErrorTriggeredUnwind } // check for sequential block numbers if blockEntry.L2BlockNumber > p.lastBlockHeight+1 { - log.Warn(fmt.Sprintf("[%s] Stream skipped ahead, restarting datastream to block %d", p.logPrefix, blockEntry.L2BlockNumber)) - return p.lastBlockHeight + 1, false, false, nil + return false, ErrorSkippedBlock } // batch boundary - record the highest hashable block number (last block in last full batch) @@ -329,13 +327,13 @@ func (p *BatchesProcessor) processFullBlock(blockEntry *types.FullL2Block) (rest // first block in the loop so read the parent hash previousHash, err := p.eriDb.ReadCanonicalHash(blockEntry.L2BlockNumber - 1) if err != nil { - return 0, false, false, fmt.Errorf("failed to get genesis header: %v", err) + return false, fmt.Errorf("failed to get genesis header: %w", err) } blockEntry.ParentHash = previousHash } if err := p.writeL2Block(blockEntry); err != nil { - return 0, false, false, fmt.Errorf("writeL2Block error: %v", err) + return false, fmt.Errorf("writeL2Block error: %w", err) } p.dsQueryClient.GetProgressAtomic().Store(blockEntry.L2BlockNumber) @@ -355,7 +353,7 @@ func (p *BatchesProcessor) processFullBlock(blockEntry *types.FullL2Block) (rest if p.debugBlockLimit == 0 { endLoop = false } - return 0, endLoop, false, nil + return endLoop, nil } // writeL2Block writes L2Block to ErigonDb and HermezDb @@ -366,20 +364,20 @@ func (p *BatchesProcessor) writeL2Block(l2Block *types.FullL2Block) error { for _, transaction := range l2Block.L2Txs { ltx, _, err := txtype.DecodeTx(transaction.Encoded, transaction.EffectiveGasPricePercentage, l2Block.ForkId) if err != nil { - return fmt.Errorf("decode tx error: %v", err) + return fmt.Errorf("decode tx error: %w", err) } txs = append(txs, ltx) if err := p.hermezDb.WriteEffectiveGasPricePercentage(ltx.Hash(), transaction.EffectiveGasPricePercentage); err != nil { - return fmt.Errorf("write effective gas price percentage error: %v", err) + return fmt.Errorf("write effective gas price percentage error: %w", err) } if err := p.hermezDb.WriteStateRoot(l2Block.L2BlockNumber, transaction.IntermediateStateRoot); err != nil { - return fmt.Errorf("write rpc root error: %v", err) + return fmt.Errorf("write rpc root error: %w", err) } if err := p.hermezDb.WriteIntermediateTxStateRoot(l2Block.L2BlockNumber, ltx.Hash(), transaction.IntermediateStateRoot); err != nil { - return fmt.Errorf("write rpc root error: %v", err) + return fmt.Errorf("write rpc root error: %w", err) } } txCollection := ethTypes.Transactions(txs) @@ -393,7 +391,7 @@ func (p *BatchesProcessor) writeL2Block(l2Block *types.FullL2Block) error { } if _, err := p.eriDb.WriteHeader(bn, l2Block.L2Blockhash, l2Block.StateRoot, txHash, l2Block.ParentHash, l2Block.Coinbase, uint64(l2Block.Timestamp), gasLimit, p.chainConfig); err != nil { - return fmt.Errorf("write header error: %v", err) + return fmt.Errorf("write header error: %w", err) } didStoreGer := false @@ -402,16 +400,16 @@ func (p *BatchesProcessor) writeL2Block(l2Block *types.FullL2Block) error { if l2Block.GlobalExitRoot != emptyHash { gerWritten, err := p.hermezDb.CheckGlobalExitRootWritten(l2Block.GlobalExitRoot) if err != nil { - return fmt.Errorf("get global exit root error: %v", err) + return fmt.Errorf("get global exit root error: %w", err) } if !gerWritten { if err := p.hermezDb.WriteBlockGlobalExitRoot(l2Block.L2BlockNumber, l2Block.GlobalExitRoot); err != nil { - return fmt.Errorf("write block global exit root error: %v", err) + return fmt.Errorf("write block global exit root error: %w", err) } if err := p.hermezDb.WriteGlobalExitRoot(l2Block.GlobalExitRoot); err != nil { - return fmt.Errorf("write global exit root error: %v", err) + return fmt.Errorf("write global exit root error: %w", err) } didStoreGer = true } @@ -419,7 +417,7 @@ func (p *BatchesProcessor) writeL2Block(l2Block *types.FullL2Block) error { if l2Block.L1BlockHash != emptyHash { if err := p.hermezDb.WriteBlockL1BlockHash(l2Block.L2BlockNumber, l2Block.L1BlockHash); err != nil { - return fmt.Errorf("write block global exit root error: %v", err) + return fmt.Errorf("write block global exit root error: %w", err) } } @@ -457,19 +455,19 @@ func (p *BatchesProcessor) writeL2Block(l2Block *types.FullL2Block) error { } if err := p.eriDb.WriteBody(bn, l2Block.L2Blockhash, txs); err != nil { - return fmt.Errorf("write body error: %v", err) + return fmt.Errorf("write body error: %w", err) } if err := p.hermezDb.WriteForkId(l2Block.BatchNumber, l2Block.ForkId); err != nil { - return fmt.Errorf("write block batch error: %v", err) + return fmt.Errorf("write block batch error: %w", err) } if err := p.hermezDb.WriteForkIdBlockOnce(l2Block.ForkId, l2Block.L2BlockNumber); err != nil { - return fmt.Errorf("write fork id block error: %v", err) + return fmt.Errorf("write fork id block error: %w", err) } if err := p.hermezDb.WriteBlockBatch(l2Block.L2BlockNumber, l2Block.BatchNumber); err != nil { - return fmt.Errorf("write block batch error: %v", err) + return fmt.Errorf("write block batch error: %w", err) } return nil diff --git a/zk/stages/stage_batches_test.go b/zk/stages/stage_batches_test.go index 9f2578ae5dc..6299f75cc39 100644 --- a/zk/stages/stage_batches_test.go +++ b/zk/stages/stage_batches_test.go @@ -79,7 +79,6 @@ func TestUnwindBatches(t *testing.T) { err = SpawnStageBatches(s, u, ctx, tx, cfg) require.NoError(t, err) tx.Commit() - tx2 := memdb.BeginRw(t, db1) // unwind to zero and check if there is any data in the tables diff --git a/zk/stages/test_utils.go b/zk/stages/test_utils.go index af9ec190587..f24557522b4 100644 --- a/zk/stages/test_utils.go +++ b/zk/stages/test_utils.go @@ -11,6 +11,7 @@ type TestDatastreamClient struct { gerUpdates []types.GerUpdate lastWrittenTimeAtomic atomic.Int64 streamingAtomic atomic.Bool + stopReadingToChannel atomic.Bool progress atomic.Uint64 entriesChan chan interface{} errChan chan error @@ -28,10 +29,6 @@ func NewTestDatastreamClient(fullL2Blocks []types.FullL2Block, gerUpdates []type return client } -func (c *TestDatastreamClient) EnsureConnected() (bool, error) { - return true, nil -} - func (c *TestDatastreamClient) ReadAllEntriesToChannel() error { c.streamingAtomic.Store(true) defer c.streamingAtomic.Swap(false) @@ -43,9 +40,24 @@ func (c *TestDatastreamClient) ReadAllEntriesToChannel() error { c.entriesChan <- &c.gerUpdates[i] } + c.entriesChan <- nil // needed to stop processing + + for { + if c.stopReadingToChannel.Load() { + break + } + } + return nil } +func (c *TestDatastreamClient) RenewEntryChannel() { +} + +func (c *TestDatastreamClient) StopReadingToChannel() { + c.stopReadingToChannel.Store(true) +} + func (c *TestDatastreamClient) GetEntryChan() *chan interface{} { return &c.entriesChan } @@ -54,14 +66,14 @@ func (c *TestDatastreamClient) GetErrChan() chan error { return c.errChan } -func (c *TestDatastreamClient) GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, int, error) { +func (c *TestDatastreamClient) GetL2BlockByNumber(blockNum uint64) (*types.FullL2Block, error) { for _, l2Block := range c.fullL2Blocks { if l2Block.L2BlockNumber == blockNum { - return &l2Block, types.CmdErrOK, nil + return &l2Block, nil } } - return nil, -1, nil + return nil, nil } func (c *TestDatastreamClient) GetLatestL2Block() (*types.FullL2Block, error) { @@ -75,10 +87,6 @@ func (c *TestDatastreamClient) GetLastWrittenTimeAtomic() *atomic.Int64 { return &c.lastWrittenTimeAtomic } -func (c *TestDatastreamClient) GetStreamingAtomic() *atomic.Bool { - return &c.streamingAtomic -} - func (c *TestDatastreamClient) GetProgressAtomic() *atomic.Uint64 { return &c.progress } diff --git a/zk/tests/unwinds/unwind.sh b/zk/tests/unwinds/unwind.sh index d83eeebbdca..84b5f436180 100755 --- a/zk/tests/unwinds/unwind.sh +++ b/zk/tests/unwinds/unwind.sh @@ -60,19 +60,9 @@ go run ./cmd/integration state_stages_zkevm \ # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase1-dump2" -# now sync again -timeout $secondTimeout ./build/bin/cdk-erigon \ - --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${stopBlock} - -# dump the data again into the post folder -go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump2" mkdir -p "$dataPath/phase1-diffs/pre" mkdir -p "$dataPath/phase1-diffs/post" -mkdir -p "$dataPath/phase2-diffs/pre" -mkdir -p "$dataPath/phase2-diffs/post" # iterate over the files in the pre-dump folder for file in $(ls $dataPath/phase1-dump1); do @@ -84,14 +74,26 @@ for file in $(ls $dataPath/phase1-dump1); do echo "No difference found in $filename" else if [ "$filename" = "Code.txt" ] || [ "$filename" = "HashedCodeHash.txt" ] || [ "$filename" = "hermez_l1Sequences.txt" ] || [ "$filename" = "hermez_l1Verifications.txt" ] || [ "$filename" = "HermezSmt.txt" ] || [ "$filename" = "PlainCodeHash.txt" ] || [ "$filename" = "SyncStage.txt" ] || [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Expected differences in $filename" + echo "Phase 1 Expected differences in $filename" else - echo "Unexpected differences in $filename" + echo "Phase 1 Unexpected differences in $filename" exit 1 fi fi done +# now sync again +timeout $secondTimeout ./build/bin/cdk-erigon \ + --datadir="$dataPath/rpc-datadir" \ + --config=./dynamic-integration8.yaml \ + --zkevm.sync-limit=${stopBlock} + +# dump the data again into the post folder +go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump2" + +mkdir -p "$dataPath/phase2-diffs/pre" +mkdir -p "$dataPath/phase2-diffs/post" + # iterate over the files in the pre-dump folder for file in $(ls $dataPath/phase2-dump1); do # get the filename @@ -99,12 +101,12 @@ for file in $(ls $dataPath/phase2-dump1); do # diff the files and if there is a difference found copy the pre and post files into the diffs folder if cmp -s $dataPath/phase2-dump1/$filename $dataPath/phase2-dump2/$filename; then - echo "No difference found in $filename" + echo "Phase 2 No difference found in $filename" else if [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Expected differences in $filename" + echo "Phase 2 Expected differences in $filename" else - echo "Unexpected differences in $filename" + echo "Phase 2 Unexpected differences in $filename" exit 2 fi fi From 4312933929a6060ac7cf6868b82182ab4b7cf516 Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Fri, 8 Nov 2024 01:28:32 +0530 Subject: [PATCH 21/88] Remove SMT logic for Zero Prover (#1374) * wip * wip * apply feedback * Fix nil state root when there isn't changes in-between blocks * apply feedback --------- Co-authored-by: Jerry --- consensus/ethash/consensus.go | 3 ++ core/blockchain_zkevm.go | 2 +- core/state/intra_block_state_zkevm.go | 44 ++++++++++++++++----------- smt/pkg/smt/entity_storage.go | 4 +++ 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index a6bfdf617df..77fa625b411 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -663,6 +663,9 @@ func AccumulateRewards(config *chain.Config, header *types.Header, uncles []*typ // accumulateRewards retrieves rewards for a block and applies them to the coinbase accounts for miner and uncle miners func accumulateRewards(config *chain.Config, state *state.IntraBlockState, header *types.Header, uncles []*types.Header) { + if config.IsNormalcy(header.Number.Uint64()) { + return + } minerReward, uncleRewards := AccumulateRewards(config, header, uncles) for i, uncle := range uncles { if i < len(uncleRewards) { diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index de39378f17b..10b61a2972a 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -134,7 +134,7 @@ func ExecuteBlockEphemerallyZk( receipts = append(receipts, receipt) } } - if !chainConfig.IsForkID7Etrog(block.NumberU64()) { + if !chainConfig.IsForkID7Etrog(block.NumberU64()) && !chainConfig.IsNormalcy(block.NumberU64()) { if err := ibs.ScalableSetSmtRootHash(roHermezDb); err != nil { return nil, err } diff --git a/core/state/intra_block_state_zkevm.go b/core/state/intra_block_state_zkevm.go index 22a44c769ac..36eb275f270 100644 --- a/core/state/intra_block_state_zkevm.go +++ b/core/state/intra_block_state_zkevm.go @@ -58,6 +58,10 @@ func (sdb *IntraBlockState) GetTxCount() (uint64, error) { } func (sdb *IntraBlockState) PostExecuteStateSet(chainConfig *chain.Config, blockNum uint64, blockInfoRoot *libcommon.Hash) { + if chainConfig.IsNormalcy(blockNum) { + return + } + //ETROG if chainConfig.IsForkID7Etrog(blockNum) { sdb.scalableSetBlockInfoRoot(blockInfoRoot) @@ -70,18 +74,20 @@ func (sdb *IntraBlockState) PreExecuteStateSet(chainConfig *chain.Config, blockN sdb.CreateAccount(ADDRESS_SCALABLE_L2, true) } - //save block number - sdb.scalableSetBlockNum(blockNumber) + if !chainConfig.IsNormalcy(blockNumber) { + //save block number + sdb.scalableSetBlockNum(blockNumber) - //ETROG - if chainConfig.IsForkID7Etrog(blockNumber) { - currentTimestamp := sdb.ScalableGetTimestamp() - if blockTimestamp > currentTimestamp { - sdb.ScalableSetTimestamp(blockTimestamp) - } + //ETROG + if chainConfig.IsForkID7Etrog(blockNumber) { + currentTimestamp := sdb.ScalableGetTimestamp() + if blockTimestamp > currentTimestamp { + sdb.ScalableSetTimestamp(blockTimestamp) + } - //save prev block hash - sdb.scalableSetBlockHash(blockNumber-1, stateRoot) + //save prev block hash + sdb.scalableSetBlockHash(blockNumber-1, stateRoot) + } } } @@ -99,18 +105,22 @@ func (sdb *IntraBlockState) SyncerPreExecuteStateSet( } //save block number - sdb.scalableSetBlockNum(blockNumber) + if !chainConfig.IsNormalcy(blockNumber) { + sdb.scalableSetBlockNum(blockNumber) + } emptyHash := libcommon.Hash{} //ETROG if chainConfig.IsForkID7Etrog(blockNumber) { - currentTimestamp := sdb.ScalableGetTimestamp() - if blockTimestamp > currentTimestamp { - sdb.ScalableSetTimestamp(blockTimestamp) - } + if !chainConfig.IsNormalcy(blockNumber) { + currentTimestamp := sdb.ScalableGetTimestamp() + if blockTimestamp > currentTimestamp { + sdb.ScalableSetTimestamp(blockTimestamp) + } - //save prev block hash - sdb.scalableSetBlockHash(blockNumber-1, prevBlockHash) + //save prev block hash + sdb.scalableSetBlockHash(blockNumber-1, prevBlockHash) + } //save ger with l1blockhash - but only in the case that the l1 info tree index hasn't been // re-used. If it has been re-used we never write this to the contract storage diff --git a/smt/pkg/smt/entity_storage.go b/smt/pkg/smt/entity_storage.go index 261b27103cd..f115359a0e6 100644 --- a/smt/pkg/smt/entity_storage.go +++ b/smt/pkg/smt/entity_storage.go @@ -207,6 +207,10 @@ func (s *SMT) SetContractStorage(ethAddr string, storage map[string]string, prog } func (s *SMT) SetStorage(ctx context.Context, logPrefix string, accChanges map[libcommon.Address]*accounts.Account, codeChanges map[libcommon.Address]string, storageChanges map[libcommon.Address]map[string]string) ([]*utils.NodeKey, []*utils.NodeValue8, error) { + if len(storageChanges) == 0 && len(accChanges) == 0 && len(codeChanges) == 0 { + return nil, nil, nil + } + var isDelete bool var err error From dfd9249c499bd66ed6f5a6f0ee021039d08108b3 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 8 Nov 2024 12:19:10 +0000 Subject: [PATCH 22/88] fix(zkevm_api): post etrog virtual batch seq no 0 (#1426) --- turbo/jsonrpc/zkevm_api.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index f72ae85d9a0..fe7aee56b31 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -258,6 +258,16 @@ func (api *ZkEvmAPIImpl) VirtualBatchNumber(ctx context.Context) (hexutil.Uint64 } if latestSequencedBatch == nil { + forkId, err := hermezDb.GetForkId(0) + if err != nil { + return hexutil.Uint64(0), err + } + + // injected batch post etrog must be both virtual and verified + if forkId >= uint64(chain.ForkID7Etrog) { + return hexutil.Uint64(1), nil + } + return hexutil.Uint64(0), nil } From effcd235074ae062706e074f0537e195dbf7b9a3 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:18:42 +0000 Subject: [PATCH 23/88] set a minimum stream client timeout (#1427) --- zk/datastream/client/stream_client.go | 31 ++++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index e8e96ed9b29..e10471863a1 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -33,6 +33,8 @@ const ( var ( // ErrFileEntryNotFound denotes error that is returned when the certain file entry is not found in the datastream ErrFileEntryNotFound = errors.New("file entry not found") + + minimumCheckTimeout = 500 * time.Millisecond ) type StreamClient struct { @@ -530,8 +532,15 @@ LOOP: break LOOP } - if c.checkTimeout > 0 { - c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout)) + var timeout time.Time + if c.checkTimeout < minimumCheckTimeout { + timeout = time.Now().Add(minimumCheckTimeout) + } else { + timeout = time.Now().Add(c.checkTimeout) + } + + if err = c.conn.SetReadDeadline(timeout); err != nil { + return err } if readNewProto { @@ -908,11 +917,14 @@ func (c *StreamClient) writeToConn(data interface{}) error { } func (c *StreamClient) resetWriteTimeout() error { - if c.checkTimeout == 0 { - return nil + var timeout time.Time + if c.checkTimeout < minimumCheckTimeout { + timeout = time.Now().Add(minimumCheckTimeout) + } else { + timeout = time.Now().Add(c.checkTimeout) } - if err := c.conn.SetWriteDeadline(time.Now().Add(c.checkTimeout)); err != nil { + if err := c.conn.SetWriteDeadline(timeout); err != nil { return fmt.Errorf("%w: conn.SetWriteDeadline: %v", ErrSocket, err) } @@ -920,11 +932,14 @@ func (c *StreamClient) resetWriteTimeout() error { } func (c *StreamClient) resetReadTimeout() error { - if c.checkTimeout == 0 { - return nil + var timeout time.Time + if c.checkTimeout < minimumCheckTimeout { + timeout = time.Now().Add(minimumCheckTimeout) + } else { + timeout = time.Now().Add(c.checkTimeout) } - if err := c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout)); err != nil { + if err := c.conn.SetReadDeadline(timeout); err != nil { return fmt.Errorf("%w: conn.SetReadDeadline: %v", ErrSocket, err) } From 48f7026f3b53ff48b51b566fbe3ca5ab850cbe7e Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Fri, 8 Nov 2024 16:36:45 +0000 Subject: [PATCH 24/88] only discard a transaction if the batch is completely empty (#1431) blocks take up counters so this tx might be valid at the start of the very next batch --- zk/stages/stage_sequence_execute.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index e55f17e41f6..3d06f8af642 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -429,7 +429,7 @@ func sequencingBatchStep( In this case we make note that we have had a transaction that overflowed and continue attempting to process transactions Once we reach the cap for these attempts we will stop producing blocks and consider the batch done */ - if !batchState.hasAnyTransactionsInThisBatch { + if !batchState.hasAnyTransactionsInThisBatch && len(batchState.builtBlocks) == 0 { // mark the transaction to be removed from the pool cfg.txPool.MarkForDiscardFromPendingBest(txHash) log.Info(fmt.Sprintf("[%s] single transaction %s cannot fit into batch", logPrefix, txHash)) From 41a354392f02021c4b5a449fcf97aef688faecf9 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Fri, 8 Nov 2024 16:56:28 +0000 Subject: [PATCH 25/88] always use most recent info tree index for new blocks (#1429) --- zk/hermez_db/db.go | 14 ++-- zk/l1infotree/updater.go | 2 +- zk/stages/stage_sequence_execute_utils.go | 83 ++++++++++------------- 3 files changed, 42 insertions(+), 57 deletions(-) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index cd93faaa798..01e8bdfe3e4 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -1255,32 +1255,32 @@ func (db *HermezDbReader) GetL1InfoTreeUpdate(idx uint64) (*types.L1InfoTreeUpda return update, nil } -func (db *HermezDbReader) GetLatestL1InfoTreeUpdate() (*types.L1InfoTreeUpdate, bool, error) { +func (db *HermezDbReader) GetLatestL1InfoTreeUpdate() (*types.L1InfoTreeUpdate, error) { cursor, err := db.tx.Cursor(L1_INFO_TREE_UPDATES) if err != nil { - return nil, false, err + return nil, err } defer cursor.Close() count, err := cursor.Count() if err != nil { - return nil, false, err + return nil, err } if count == 0 { - return nil, false, nil + return nil, nil } _, v, err := cursor.Last() if err != nil { - return nil, false, err + return nil, err } if len(v) == 0 { - return nil, false, nil + return nil, nil } result := &types.L1InfoTreeUpdate{} result.Unmarshall(v) - return result, true, nil + return result, nil } func (db *HermezDb) WriteBlockL1InfoTreeIndex(blockNumber uint64, l1Index uint64) error { diff --git a/zk/l1infotree/updater.go b/zk/l1infotree/updater.go index ecfebc7c76d..28a8e8176ff 100644 --- a/zk/l1infotree/updater.go +++ b/zk/l1infotree/updater.go @@ -74,7 +74,7 @@ func (u *Updater) WarmUp(tx kv.RwTx) (err error) { u.progress = progress - latestUpdate, _, err := hermezDb.GetLatestL1InfoTreeUpdate() + latestUpdate, err := hermezDb.GetLatestL1InfoTreeUpdate() if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 52045d2e03d..c72c7954de4 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -353,66 +353,51 @@ func prepareTickers(cfg *SequenceBlockCfg) (*time.Ticker, *time.Ticker, *time.Ti // will be called at the start of every new block created within a batch to figure out if there is a new GER // we can use or not. In the special case that this is the first block we just return 0 as we need to use the // 0 index first before we can use 1+ -func calculateNextL1TreeUpdateToUse(lastInfoIndex uint64, hermezDb *hermez_db.HermezDb, proposedTimestamp uint64) (uint64, *zktypes.L1InfoTreeUpdate, error) { +func calculateNextL1TreeUpdateToUse(recentlyUsed uint64, hermezDb *hermez_db.HermezDb, proposedTimestamp uint64) (uint64, *zktypes.L1InfoTreeUpdate, error) { // always default to 0 and only update this if the next available index has reached finality - var ( - nextL1Index uint64 = 0 - l1Info *zktypes.L1InfoTreeUpdate - err error - ) + var nextL1Index uint64 = 0 - if lastInfoIndex == 0 { - // potentially at the start of the chain so get the latest info tree index in the DB and work - // backwards until we find a valid one to use - l1Info, err = getNetworkStartInfoTreeIndex(hermezDb, proposedTimestamp) - if err != nil || l1Info == nil { - return 0, nil, err - } - nextL1Index = l1Info.Index - } else { - // check if the next index is there and if it has reached finality or not - l1Info, err = hermezDb.GetL1InfoTreeUpdate(lastInfoIndex + 1) - if err != nil { - return 0, nil, err - } + /* + get the progress of the chain so far, then get the latest available data for the next index. + If these values are the same info tree update we return 0 as no-change. If the next index is + higher we return that one so long as it is valid. + */ - // ensure that we are above the min timestamp for this index to use it - if l1Info != nil && l1Info.Timestamp <= proposedTimestamp { - nextL1Index = l1Info.Index - } + latestIndex, err := hermezDb.GetLatestL1InfoTreeUpdate() + if err != nil { + return 0, nil, err } - return nextL1Index, l1Info, nil -} - -func getNetworkStartInfoTreeIndex(hermezDb *hermez_db.HermezDb, proposedTimestamp uint64) (*zktypes.L1InfoTreeUpdate, error) { - l1Info, found, err := hermezDb.GetLatestL1InfoTreeUpdate() - if err != nil || !found || l1Info == nil { - return nil, err + if latestIndex == nil || latestIndex.Index <= recentlyUsed { + // no change + return 0, nil, nil } - if l1Info.Timestamp > proposedTimestamp { - // not valid so move back one index - we need one less than or equal to the proposed timestamp - lastIndex := l1Info.Index - for lastIndex > 0 { - lastIndex = lastIndex - 1 - l1Info, err = hermezDb.GetL1InfoTreeUpdate(lastIndex) - if err != nil { - return nil, err - } - if l1Info != nil && l1Info.Timestamp <= proposedTimestamp { - break - } + // now verify that the latest known index is valid and work backwards until we find one that is + // or, we reach the most recently used index or 0 + for { + if latestIndex.Timestamp <= proposedTimestamp { + nextL1Index = latestIndex.Index + break + } + + if latestIndex.Index == 0 || latestIndex.Index <= recentlyUsed { + // end of the line + return 0, nil, nil + } + + latestIndex, err = hermezDb.GetL1InfoTreeUpdate(latestIndex.Index - 1) + if err != nil { + return 0, nil, err + } + + if latestIndex == nil { + return 0, nil, nil } - } - // final check that the l1Info is actually valid before returning, index 0 or 1 might be invalid for - // some strange reason so just use index 0 in this case - it is always safer to use a 0 index - if l1Info == nil || l1Info.Timestamp > proposedTimestamp { - return nil, nil } - return l1Info, nil + return nextL1Index, latestIndex, nil } func updateSequencerProgress(tx kv.RwTx, newHeight uint64, newBatch uint64, unwinding bool) error { From 3f888033477722adfd2d9426029787ae81adca0a Mon Sep 17 00:00:00 2001 From: Jerry Date: Fri, 8 Nov 2024 11:11:35 -0800 Subject: [PATCH 26/88] Allow execution to short circuit to the last downloaded batch (#1432) --- cmd/utils/flags.go | 5 +++++ eth/ethconfig/config_zkevm.go | 1 + eth/stagedsync/stage_execute_zkevm.go | 2 +- turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 6 +++++- zk/utils/utils.go | 10 +++++----- 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1c8ece0b9c6..756b1b9628c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -410,6 +410,11 @@ var ( Usage: "The time to wait for data to arrive from the stream before reporting an error (0s doesn't check)", Value: "3s", } + L2ShortCircuitToVerifiedBatchFlag = cli.BoolFlag{ + Name: "zkevm.l2-short-circuit-to-verified-batch", + Usage: "Short circuit block execution up to the batch after the latest verified batch (default: true). When disabled, the sequencer will execute all downloaded batches", + Value: true, + } L1SyncStartBlock = cli.Uint64Flag{ Name: "zkevm.l1-sync-start-block", Usage: "Designed for recovery of the network from the L1 batch data, slower mode of operation than the datastream. If set the datastream will not be used", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 8cd78e64753..93a539b49a1 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -12,6 +12,7 @@ type Zk struct { L2RpcUrl string L2DataStreamerUrl string L2DataStreamerTimeout time.Duration + L2ShortCircuitToVerifiedBatch bool L1SyncStartBlock uint64 L1SyncStopBatch uint64 L1ChainId uint64 diff --git a/eth/stagedsync/stage_execute_zkevm.go b/eth/stagedsync/stage_execute_zkevm.go index 2cbe839888b..6debcc4ed88 100644 --- a/eth/stagedsync/stage_execute_zkevm.go +++ b/eth/stagedsync/stage_execute_zkevm.go @@ -264,7 +264,7 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 return to, total, nil } - shouldShortCircuit, noProgressTo, err := utils.ShouldShortCircuitExecution(tx, logPrefix) + shouldShortCircuit, noProgressTo, err := utils.ShouldShortCircuitExecution(tx, logPrefix, cfg.zk.L2ShortCircuitToVerifiedBatch) if err != nil { return 0, 0, err } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index b8dd0f6463e..9e9c9ba7af9 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -175,6 +175,7 @@ var DefaultFlags = []cli.Flag{ &utils.L2RpcUrlFlag, &utils.L2DataStreamerUrlFlag, &utils.L2DataStreamerTimeout, + &utils.L2ShortCircuitToVerifiedBatchFlag, &utils.L1SyncStartBlock, &utils.L1SyncStopBatch, &utils.L1ChainIdFlag, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 56ec8ec0bee..cc8416d4e40 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -8,13 +8,14 @@ import ( "time" + "strconv" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/zk/sequencer" utils2 "github.com/ledgerwatch/erigon/zk/utils" "github.com/urfave/cli/v2" - "strconv" ) var DeprecatedFlags = map[string]string{ @@ -70,6 +71,8 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { panic(fmt.Sprintf("could not parse l2 datastreamer timeout value %s", l2DataStreamTimeoutVal)) } + l2ShortCircuitToVerifiedBatchVal := ctx.Bool(utils.L2ShortCircuitToVerifiedBatchFlag.Name) + sequencerBlockSealTimeVal := ctx.String(utils.SequencerBlockSealTime.Name) sequencerBlockSealTime, err := time.ParseDuration(sequencerBlockSealTimeVal) if err != nil { @@ -133,6 +136,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name), L2DataStreamerUrl: ctx.String(utils.L2DataStreamerUrlFlag.Name), L2DataStreamerTimeout: l2DataStreamTimeout, + L2ShortCircuitToVerifiedBatch: l2ShortCircuitToVerifiedBatchVal, L1SyncStartBlock: ctx.Uint64(utils.L1SyncStartBlock.Name), L1SyncStopBatch: ctx.Uint64(utils.L1SyncStopBatch.Name), L1ChainId: ctx.Uint64(utils.L1ChainIdFlag.Name), diff --git a/zk/utils/utils.go b/zk/utils/utils.go index 10eac499aed..4a10cfaa0a8 100644 --- a/zk/utils/utils.go +++ b/zk/utils/utils.go @@ -20,7 +20,7 @@ import ( // if current sync is before verified batch - short circuit to verified batch, otherwise to enx of next batch // if there is no new fully downloaded batch - do not short circuit // returns (shouldShortCircuit, blockNumber, error) -func ShouldShortCircuitExecution(tx kv.RwTx, logPrefix string) (bool, uint64, error) { +func ShouldShortCircuitExecution(tx kv.RwTx, logPrefix string, l2ShortCircuitToVerifiedBatch bool) (bool, uint64, error) { hermezDb := hermez_db.NewHermezDb(tx) // get highest verified batch @@ -48,10 +48,10 @@ func ShouldShortCircuitExecution(tx kv.RwTx, logPrefix string) (bool, uint64, er var shortCircuitBatch, shortCircuitBlock, cycle uint64 // this is so empty batches work - for shortCircuitBlock == 0 { + for shortCircuitBlock == 0 || (!l2ShortCircuitToVerifiedBatch && executedBatch+cycle < downloadedBatch) { cycle++ - // if executed lower than verified, short curcuit up to verified - if executedBatch < highestVerifiedBatchNo { + // if executed lower than verified, short circuit up to verified (only if l2ShortCircuitToVerifiedBatch is true) + if executedBatch < highestVerifiedBatchNo && l2ShortCircuitToVerifiedBatch { if downloadedBatch < highestVerifiedBatchNo { shortCircuitBatch = downloadedBatch } else { @@ -59,7 +59,7 @@ func ShouldShortCircuitExecution(tx kv.RwTx, logPrefix string) (bool, uint64, er } } else if executedBatch+cycle <= downloadedBatch { // else short circuit up to next downloaded batch shortCircuitBatch = executedBatch + cycle - } else { // if we don't have at least one more full downlaoded batch, don't short circuit and just execute to latest block + } else { // if we don't have at least one more full downloaded batch, don't short circuit and just execute to latest block return false, 0, nil } From db2fe26b408bffcda080b6b95e4c2d08bec8c145 Mon Sep 17 00:00:00 2001 From: tclemos Date: Fri, 8 Nov 2024 19:02:26 -0300 Subject: [PATCH 27/88] implement test cases for TestSpawnL1SequencerSyncStage --- zk/stages/stage_l1_sequencer_sync_test.go | 193 ++++++++++++---------- 1 file changed, 107 insertions(+), 86 deletions(-) diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go index d5482804c89..62251bd33a8 100644 --- a/zk/stages/stage_l1_sequencer_sync_test.go +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -20,6 +20,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) @@ -84,48 +85,119 @@ func TestSpawnL1SequencerSyncStage(t *testing.T) { Topics: l1ContractTopics, } - filteredLogs := []types.Log{ - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, - }, - - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, - }, + type testCase struct { + name string + getLog func(hDB *hermez_db.HermezDb) (types.Log, error) + assert func(t *testing.T, hDB *hermez_db.HermezDb) + } - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.AddNewRollupTypeTopic}, + const forkIdBytesStartPosition = 64 + const forkIdBytesEndPosition = 96 + const rollupDataSize = 100 + + testCases := []testCase{ + { + name: "AddNewRollupType", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupType := uint64(1) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupData := make([]byte, rollupDataSize) + rollupForkId := uint64(111) + rollupForkIdHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupForkId).Bytes()) + copy(rollupData[forkIdBytesStartPosition:forkIdBytesEndPosition], rollupForkIdHash.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopic, rollupTypeHash}, + Data: rollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forkID, err := hDB.GetForkFromRollupType(uint64(1)) + require.NoError(t, err) + + assert.Equal(t, forkID, uint64(111)) + }, }, - - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.AddNewRollupTypeTopicBanana}, + { + name: "AddNewRollupTypeTopicBanana", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupType := uint64(2) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupData := make([]byte, rollupDataSize) + rollupForkId := uint64(222) + rollupForkIdHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupForkId).Bytes()) + copy(rollupData[forkIdBytesStartPosition:forkIdBytesEndPosition], rollupForkIdHash.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopicBanana, rollupTypeHash}, + Data: rollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forkID, err := hDB.GetForkFromRollupType(uint64(2)) + require.NoError(t, err) + + assert.Equal(t, forkID, uint64(222)) + }, }, - - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.CreateNewRollupTopic}, + { + name: "CreateNewRollupTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupID := uint64(99999) + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + rollupType := uint64(33) + rollupForkID := uint64(333) + if funcErr := hDB.WriteRollupType(rollupType, rollupForkID); funcErr != nil { + return types.Log{}, funcErr + } + newRollupDataCreation := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()).Bytes() + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.CreateNewRollupTopic, rollupIDHash}, + Data: newRollupDataCreation, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forks, batches, err := hDB.GetAllForkHistory() + for i := 0; i < len(forks); i++ { + if forks[i] == uint64(333) { + assert.Equal(t, batches[i], uint64(0)) + break + } + } + require.NoError(t, err) + }, }, + // types.Log{ + // BlockNumber: latestBlockNumber.Uint64(), + // Address: l1ContractAddresses[0], + // Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, + // }, + + // types.Log{ + // BlockNumber: latestBlockNumber.Uint64(), + // Address: l1ContractAddresses[0], + // Topics: []common.Hash{contracts.UpdateRollupTopic}, + // }, + } - types.Log{ - BlockNumber: latestBlockNumber.Uint64(), - Address: l1ContractAddresses[0], - Topics: []common.Hash{contracts.UpdateRollupTopic}, - }, + filteredLogs := []types.Log{} + for _, tc := range testCases { + ll, err := tc.getLog(hDB) + require.NoError(t, err) + filteredLogs = append(filteredLogs, ll) } + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") // updater := l1infotree.NewUpdater(ðconfig.Zk{}, l1Syncer) zkCfg := ðconfig.Zk{ + L1RollupId: uint64(99999), L1FirstBlock: l1FirstBlock.Uint64(), L1FinalizedBlockRequirement: uint64(21), } @@ -135,59 +207,8 @@ func TestSpawnL1SequencerSyncStage(t *testing.T) { err = SpawnL1SequencerSyncStage(s, u, tx, cfg, ctx, log.New()) require.NoError(t, err) - // // assert - // // check tree - // tree, err := l1infotree.InitialiseL1InfoTree(hDB) - // require.NoError(t, err) - - // combined := append(mainnetExitRoot.Bytes(), rollupExitRoot.Bytes()...) - // gerBytes := keccak256.Hash(combined) - // ger := common.BytesToHash(gerBytes) - // leafBytes := l1infotree.HashLeafData(ger, latestBlockParentHash, latestBlockTime) - - // assert.True(t, tree.LeafExists(leafBytes)) - - // // check WriteL1InfoTreeLeaf - // leaves, err := hDB.GetAllL1InfoTreeLeaves() - // require.NoError(t, err) - - // leafHash := common.BytesToHash(leafBytes[:]) - // assert.Len(t, leaves, 1) - // assert.Equal(t, leafHash.String(), leaves[0].String()) - - // // check WriteL1InfoTreeUpdate - // l1InfoTreeUpdate, err := hDB.GetL1InfoTreeUpdate(0) - // require.NoError(t, err) - - // assert.Equal(t, uint64(0), l1InfoTreeUpdate.Index) - // assert.Equal(t, ger, l1InfoTreeUpdate.GER) - // assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdate.MainnetExitRoot) - // assert.Equal(t, rollupExitRoot, l1InfoTreeUpdate.RollupExitRoot) - // assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdate.BlockNumber) - // assert.Equal(t, latestBlockTime, l1InfoTreeUpdate.Timestamp) - // assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdate.ParentHash) - - // //check WriteL1InfoTreeUpdateToGer - // l1InfoTreeUpdateToGer, err := hDB.GetL1InfoTreeUpdateByGer(ger) - // require.NoError(t, err) - - // assert.Equal(t, uint64(0), l1InfoTreeUpdateToGer.Index) - // assert.Equal(t, ger, l1InfoTreeUpdateToGer.GER) - // assert.Equal(t, mainnetExitRoot, l1InfoTreeUpdateToGer.MainnetExitRoot) - // assert.Equal(t, rollupExitRoot, l1InfoTreeUpdateToGer.RollupExitRoot) - // assert.Equal(t, latestBlockNumber.Uint64(), l1InfoTreeUpdateToGer.BlockNumber) - // assert.Equal(t, latestBlockTime, l1InfoTreeUpdateToGer.Timestamp) - // assert.Equal(t, latestBlockParentHash, l1InfoTreeUpdateToGer.ParentHash) - - // // check WriteL1InfoTreeRoot - // root, _, _ := tree.GetCurrentRootCountAndSiblings() - // index, found, err := hDB.GetL1InfoTreeIndexByRoot(root) - // assert.NoError(t, err) - // assert.Equal(t, uint64(0), index) - // assert.True(t, found) - - // // check SaveStageProgress - // progress, err := stages.GetStageProgress(tx, stages.L1InfoTree) - // require.NoError(t, err) - // assert.Equal(t, latestBlockNumber.Uint64()+1, progress) + // assert + for _, tc := range testCases { + tc.assert(t, hDB) + } } From 97ec39cc86047ef773457d8a534709f578df4ccf Mon Sep 17 00:00:00 2001 From: tclemos Date: Sat, 9 Nov 2024 11:41:25 -0300 Subject: [PATCH 28/88] add more test cases for TestSpawnL1SequencerSyncStage --- zk/stages/stage_l1_sequencer_sync.go | 2 +- zk/stages/stage_l1_sequencer_sync_test.go | 104 +++++++++++++++++++--- 2 files changed, 91 insertions(+), 15 deletions(-) diff --git a/zk/stages/stage_l1_sequencer_sync.go b/zk/stages/stage_l1_sequencer_sync.go index 187946c1dda..cac85804941 100644 --- a/zk/stages/stage_l1_sequencer_sync.go +++ b/zk/stages/stage_l1_sequencer_sync.go @@ -199,7 +199,7 @@ Loop: const ( injectedBatchLogTransactionStartByte = 128 - injectedBatchLastGerStartByte = 31 + injectedBatchLastGerStartByte = 32 injectedBatchLastGerEndByte = 64 injectedBatchSequencerStartByte = 76 injectedBatchSequencerEndByte = 96 diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go index 62251bd33a8..5dc1f836dbb 100644 --- a/zk/stages/stage_l1_sequencer_sync_test.go +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -91,11 +91,54 @@ func TestSpawnL1SequencerSyncStage(t *testing.T) { assert func(t *testing.T, hDB *hermez_db.HermezDb) } - const forkIdBytesStartPosition = 64 - const forkIdBytesEndPosition = 96 - const rollupDataSize = 100 + const ( + forkIdBytesStartPosition = 64 + forkIdBytesEndPosition = 96 + rollupDataSize = 100 + + injectedBatchLogTransactionStartByte = 128 + injectedBatchLastGerStartByte = 32 + injectedBatchLastGerEndByte = 64 + injectedBatchSequencerStartByte = 76 + injectedBatchSequencerEndByte = 96 + ) testCases := []testCase{ + { + name: "InitialSequenceBatchesTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + ger := common.HexToHash("0x111111111") + sequencer := common.HexToAddress("0x222222222") + batchL2Data := common.HexToHash("0x333333333") + + initialSequenceBatchesData := make([]byte, 200) + copy(initialSequenceBatchesData[injectedBatchLastGerStartByte:injectedBatchLastGerEndByte], ger.Bytes()) + copy(initialSequenceBatchesData[injectedBatchSequencerStartByte:injectedBatchSequencerEndByte], sequencer.Bytes()) + copy(initialSequenceBatchesData[injectedBatchLogTransactionStartByte:], batchL2Data.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, + Data: initialSequenceBatchesData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + ger := common.HexToHash("0x111111111") + sequencer := common.HexToAddress("0x222222222") + batchL2Data := common.HexToHash("0x333333333") + + l1InjectedBatch, err := hDB.GetL1InjectedBatch(0) + require.NoError(t, err) + + assert.Equal(t, l1InjectedBatch.L1BlockNumber, latestBlock.NumberU64()) + assert.Equal(t, l1InjectedBatch.Timestamp, latestBlock.Time()) + assert.Equal(t, l1InjectedBatch.L1BlockHash, latestBlock.Hash()) + assert.Equal(t, l1InjectedBatch.L1ParentHash, latestBlock.ParentHash()) + assert.Equal(t, l1InjectedBatch.LastGlobalExitRoot.String(), ger.String()) + assert.Equal(t, l1InjectedBatch.Sequencer.String(), sequencer.String()) + assert.ElementsMatch(t, l1InjectedBatch.Transaction, batchL2Data.Bytes()) + }, + }, { name: "AddNewRollupType", getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { @@ -172,17 +215,40 @@ func TestSpawnL1SequencerSyncStage(t *testing.T) { require.NoError(t, err) }, }, - // types.Log{ - // BlockNumber: latestBlockNumber.Uint64(), - // Address: l1ContractAddresses[0], - // Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, - // }, - - // types.Log{ - // BlockNumber: latestBlockNumber.Uint64(), - // Address: l1ContractAddresses[0], - // Topics: []common.Hash{contracts.UpdateRollupTopic}, - // }, + { + name: "UpdateRollupTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupID := uint64(99999) + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + rollupType := uint64(44) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupForkID := uint64(444) + if funcErr := hDB.WriteRollupType(rollupType, rollupForkID); funcErr != nil { + return types.Log{}, funcErr + } + latestVerified := uint64(4444) + latestVerifiedHash := common.BytesToHash(big.NewInt(0).SetUint64(latestVerified).Bytes()) + updateRollupData := rollupTypeHash.Bytes() + updateRollupData = append(updateRollupData, latestVerifiedHash.Bytes()...) + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.UpdateRollupTopic, rollupIDHash}, + Data: updateRollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forks, batches, err := hDB.GetAllForkHistory() + for i := 0; i < len(forks); i++ { + if forks[i] == uint64(444) { + assert.Equal(t, batches[i], uint64(4444)) + break + } + } + require.NoError(t, err) + }, + }, } filteredLogs := []types.Log{} @@ -212,3 +278,13 @@ func TestSpawnL1SequencerSyncStage(t *testing.T) { tc.assert(t, hDB) } } + +func TestUnwindL1SequencerSyncStage(t *testing.T) { + err := UnwindL1SequencerSyncStage(nil, nil, L1SequencerSyncCfg{}, context.Background()) + assert.Nil(t, err) +} + +func TestPruneL1SequencerSyncStage(t *testing.T) { + err := PruneL1SequencerSyncStage(nil, nil, L1SequencerSyncCfg{}, context.Background()) + assert.Nil(t, err) +} From a3176d191288b09e71ff01de08693b5fb9b5d9e4 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Mon, 11 Nov 2024 15:38:36 +0000 Subject: [PATCH 29/88] fix(zkevm_api): feed accinputhash back to accinputhash calc (#1438) --- turbo/jsonrpc/zkevm_api.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index fe7aee56b31..44bd131b05d 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -764,6 +764,7 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, // calculate acc input hash for i := 0; i < int(batchNum-prevSequenceBatch); i++ { accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) + prevSequenceAccinputHash = *accInputHash } return From 0f99eaf5426c4147690deba742f7bb5614cc0cbd Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Mon, 11 Nov 2024 16:22:26 +0000 Subject: [PATCH 30/88] tweak(zkevm_api): check if sequencer for closed batch (#1439) --- turbo/jsonrpc/zkevm_api.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 44bd131b05d..d58a2a7656c 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -551,10 +551,13 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp batch.Timestamp = types.ArgUint64(block.Time()) } - // if we don't have a datastream available to verify that a batch is actually - // closed then we fall back to existing behaviour of checking if the next batch - // has any blocks in it - if api.datastreamServer != nil { + /* + if node is a sequencer it won't have the required data stored in the db, so use the datastream + server to figure out if the batch is closed, otherwise fall back. This ensures good performance + for RPC nodes in daisy chain node which do have a datastream (previous check was testing for + presence of datastream server). + */ + if sequencer.IsSequencer() { highestClosed, err := api.datastreamServer.GetHighestClosedBatchNoCache() if err != nil { return nil, err From 17ca7c9226471b81173fbc7a458c2948aacdb4ee Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Mon, 11 Nov 2024 16:32:40 +0000 Subject: [PATCH 31/88] tweak(zkevm_api): improve accinput missing batch log (#1441) --- turbo/jsonrpc/zkevm_api.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index d58a2a7656c..7f4670e6732 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -721,7 +721,15 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, } if prevSequence == nil || batchSequence == nil { - return nil, fmt.Errorf("failed to get sequence data for batch %d", batchNum) + var missing string + if prevSequence == nil && batchSequence == nil { + missing = "previous and current batch sequences" + } else if prevSequence == nil { + missing = "previous batch sequence" + } else { + missing = "current batch sequence" + } + return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) } // get batch range for sequence From 59c1e3faf0a5aa1f962b7eabda62c366aae53159 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:22:02 +0000 Subject: [PATCH 32/88] fall back method for checking batch closed state (#1442) --- turbo/jsonrpc/zkevm_api.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 7f4670e6732..8825bbbe44a 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -576,6 +576,21 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp return nil, err } + if batchNo <= latestClosedbatchNum { + // simple check if we have a closed batch entry higher than or equal to the one requested + batch.Closed = true + } else { + // we might be missing a batch end along the way so lets double check if we have a block + // from the next batch or not + _, foundHigher, err := hermezDb.GetLowestBlockInBatch(batchNo + 1) + if err != nil { + return nil, err + } + if foundHigher { + batch.Closed = true + } + } + batch.Closed = batchNo <= latestClosedbatchNum } From bcc110d47f6c952f723fca1ed85e1395d8a5a1a2 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:30:19 +0200 Subject: [PATCH 33/88] fix: setting correct timeout on buffer emptying (#1437) * fix: setting correct timeout on buffer emptying * fix: handle sent stop command error * fix: send stop command if already started error occurs * fix: nullate errors on loops * fix: add stops after all start commands * fix datastream unit test after new timeout code is in place * fix for test DS client * DS test timeout updates * test helper for stream client * change to handling isStreaming logic in DS client * missing function on DS test client --------- Co-authored-by: Scott Fairclough --- zk/datastream/client/stream_client.go | 94 +++++++++++++++++----- zk/datastream/client/stream_client_test.go | 15 ++-- zk/stages/stage_batches.go | 10 ++- zk/stages/test_utils.go | 7 +- 4 files changed, 94 insertions(+), 32 deletions(-) diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index e10471863a1..1c536b87095 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" "github.com/ledgerwatch/erigon/zk/datastream/types" "github.com/ledgerwatch/log/v3" + "sync" ) type StreamType uint64 @@ -49,7 +50,8 @@ type StreamClient struct { // atomic lastWrittenTime atomic.Int64 - streaming atomic.Bool + mtxStreaming *sync.Mutex + streaming bool progress atomic.Uint64 stopReadingToChannel atomic.Bool @@ -58,6 +60,11 @@ type StreamClient struct { // keeps track of the latest fork from the stream to assign to l2 blocks currentFork uint64 + + // used for testing, during normal execution lots of stop streaming commands are sent + // which makes sense for an active server listening for these things but in unit tests + // this makes behaviour very unpredictable and hard to test + allowStops bool } const ( @@ -83,6 +90,7 @@ func NewClient(ctx context.Context, server string, version int, checkTimeout tim streamType: StSequencer, entryChan: make(chan interface{}, 100000), currentFork: uint64(latestDownloadedForkId), + mtxStreaming: &sync.Mutex{}, } return c @@ -133,7 +141,9 @@ func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.Ful if errors.Is(err, types.ErrAlreadyStarted) { // if the client is already started, we can stop the client and try again - c.Stop() + if errStop := c.Stop(); errStop != nil { + log.Warn("failed to send stop command", "error", errStop) + } } else if !errors.Is(err, ErrSocket) { return nil, fmt.Errorf("getL2BlockByNumber: %w", err) } @@ -142,6 +152,7 @@ func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.Ful time.Sleep(1 * time.Second) connected = c.handleSocketError(err) count++ + err = nil } return fullBLock, nil @@ -182,6 +193,10 @@ func (c *StreamClient) getL2BlockByNumber(blockNum uint64) (l2Block *types.FullL return nil, fmt.Errorf("expected block number %d but got %d", blockNum, l2Block.L2BlockNumber) } + if err := c.Stop(); err != nil { + return nil, fmt.Errorf("Stop: %w", err) + } + return l2Block, nil } @@ -203,16 +218,25 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error return nil, ErrFailedAttempts } if connected { - if err := c.stopStreamingIfStarted(); err != nil { - return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + if err = c.stopStreamingIfStarted(); err != nil { + err = fmt.Errorf("stopStreamingIfStarted: %w", err) } - - if l2Block, err = c.getLatestL2Block(); err == nil { - break + if err == nil { + if l2Block, err = c.getLatestL2Block(); err == nil { + break + } + err = fmt.Errorf("getLatestL2Block: %w", err) } - if !errors.Is(err, ErrSocket) { - return nil, fmt.Errorf("getLatestL2Block: %w", err) + + if err != nil && !errors.Is(err, ErrSocket) { + return nil, err + } else if errors.Is(err, types.ErrAlreadyStarted) { + // if the client is already started, we can stop the client and try again + if errStop := c.Stop(); errStop != nil { + log.Warn("failed to send stop command", "error", errStop) + } } + err = nil } time.Sleep(1 * time.Second) @@ -222,17 +246,31 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error return l2Block, nil } +func (c *StreamClient) getStreaming() bool { + c.mtxStreaming.Lock() + defer c.mtxStreaming.Unlock() + return c.streaming +} + +func (c *StreamClient) setStreaming(val bool) { + c.mtxStreaming.Lock() + defer c.mtxStreaming.Unlock() + c.streaming = val +} + // don't check for errors here, we just need to empty the socket for next reads func (c *StreamClient) stopStreamingIfStarted() error { - if c.streaming.Load() { - c.sendStopCmd() - c.streaming.Store(false) + if c.getStreaming() { + if err := c.sendStopCmd(); err != nil { + return fmt.Errorf("sendStopCmd: %w", err) + } + c.setStreaming(false) } // empty the socket buffer for { - c.conn.SetReadDeadline(time.Now().Add(100)) - if _, err := c.readBuffer(100); err != nil { + c.conn.SetReadDeadline(time.Now().Add(1 * time.Millisecond)) + if _, err := readBuffer(c.conn, 1000 /* arbitrary number*/); err != nil { break } } @@ -271,6 +309,10 @@ func (c *StreamClient) getLatestL2Block() (l2Block *types.FullL2Block, err error return nil, errors.New("no block found") } + if err := c.Stop(); err != nil { + return nil, fmt.Errorf("Stop: %w", err) + } + return l2Block, nil } @@ -294,15 +336,15 @@ func (c *StreamClient) Start() error { return nil } -func (c *StreamClient) Stop() { - if c.conn == nil { - return +func (c *StreamClient) Stop() error { + if c.conn == nil || !c.allowStops { + return nil } if err := c.sendStopCmd(); err != nil { - log.Warn(fmt.Sprintf("send stop command: %v", err)) + return fmt.Errorf("sendStopCmd: %w", err) } - // c.conn.Close() - // c.conn = nil + + return nil } // Command header: Get status @@ -467,7 +509,7 @@ func (c *StreamClient) handleSocketError(socketErr error) bool { // reads entries to the end of the stream // at end will wait for new entries to arrive func (c *StreamClient) readAllEntriesToChannel() (err error) { - c.streaming.Store(true) + c.setStreaming(true) c.stopReadingToChannel.Store(false) var bookmark *types.BookmarkProto @@ -502,6 +544,8 @@ func (c *StreamClient) initiateDownloadBookmark(bookmark []byte) (*types.ResultE return nil, fmt.Errorf("sendBookmarkCmd: %w", err) } + c.setStreaming(true) + re, err := c.afterStartCommand() if err != nil { return re, fmt.Errorf("afterStartCommand: %w", err) @@ -945,3 +989,11 @@ func (c *StreamClient) resetReadTimeout() error { return nil } + +// PrepUnwind handles the state of the client prior to searching to the +// common ancestor block +func (c *StreamClient) PrepUnwind() { + // this is to ensure that the later call to stop streaming if streaming + // is activated. + c.setStreaming(true) +} diff --git a/zk/datastream/client/stream_client_test.go b/zk/datastream/client/stream_client_test.go index f8078889e6b..db0f80e088a 100644 --- a/zk/datastream/client/stream_client_test.go +++ b/zk/datastream/client/stream_client_test.go @@ -50,7 +50,7 @@ func TestStreamClientReadHeaderEntry(t *testing.T) { } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -118,7 +118,7 @@ func TestStreamClientReadResultEntry(t *testing.T) { } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -191,7 +191,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { }, } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer c.Stop() defer server.Close() @@ -215,7 +215,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { } func TestStreamClientReadParsedProto(t *testing.T) { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) serverConn, clientConn := net.Pipe() c.conn = clientConn c.checkTimeout = 1 * time.Second @@ -287,9 +287,10 @@ func TestStreamClientGetLatestL2Block(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) c.conn = clientConn c.checkTimeout = 1 * time.Second + c.allowStops = false expectedL2Block, _ := createL2BlockAndTransactions(t, 5, 0) l2BlockProto := &types.L2BlockProto{L2Block: expectedL2Block} l2BlockRaw, err := l2BlockProto.Marshal() @@ -400,11 +401,12 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) c.header = &types.HeaderEntry{ TotalEntries: 4, } c.conn = clientConn + c.allowStops = false c.checkTimeout = 1 * time.Second bookmark := types.NewBookmarkProto(blockNum, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) bookmarkRaw, err := bookmark.Marshal() @@ -487,7 +489,6 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { return } } - } go createServerResponses(t, serverConn, bookmarkRaw, l2BlockRaw, l2TxsRaw, l2BlockEndRaw, errCh) diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index ed2b8291fa4..0fbb448c16b 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -66,7 +66,8 @@ type DatastreamClient interface { GetLatestL2Block() (*types.FullL2Block, error) GetProgressAtomic() *atomic.Uint64 Start() error - Stop() + Stop() error + PrepUnwind() } type DatastreamReadRunner interface { @@ -208,7 +209,7 @@ func SpawnStageBatches( log.Info(fmt.Sprintf("[%s] Waiting for at least one new block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "last processed block", stageProgressBlockNo) newBlockCheckStartTIme = time.Now() } - time.Sleep(1 * time.Second) + time.Sleep(50 * time.Millisecond) } log.Debug(fmt.Sprintf("[%s] Highest block in db and datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "dbBlock", stageProgressBlockNo) @@ -630,6 +631,7 @@ func rollback( tx kv.RwTx, u stagedsync.Unwinder, ) (uint64, error) { + dsQueryClient.PrepUnwind() ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(eriDb, hermezDb, dsQueryClient, latestDSBlockNum) if err != nil { return 0, err @@ -746,7 +748,9 @@ func newStreamClient(ctx context.Context, cfg BatchesCfg, latestForkId uint64) ( return nil, nil, fmt.Errorf("dsClient.Start: %w", err) } stopFn = func() { - dsClient.Stop() + if err := dsClient.Stop(); err != nil { + log.Warn("Failed to stop datastream client", "err", err) + } } } else { dsClient = cfg.dsClient diff --git a/zk/stages/test_utils.go b/zk/stages/test_utils.go index f24557522b4..221ccc1734b 100644 --- a/zk/stages/test_utils.go +++ b/zk/stages/test_utils.go @@ -100,6 +100,11 @@ func (c *TestDatastreamClient) Start() error { return nil } -func (c *TestDatastreamClient) Stop() { +func (c *TestDatastreamClient) Stop() error { c.isStarted = false + return nil +} + +func (c *TestDatastreamClient) PrepUnwind() { + // do nothing } From 73591fe196097c83c5d5d08e6d0cfc405dc581ae Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:19:21 +0200 Subject: [PATCH 34/88] fix: use stage_execute block numbers for a sequencer in rpc calls (#1444) --- turbo/jsonrpc/bor_helper.go | 2 +- turbo/jsonrpc/debug_api.go | 5 +- turbo/jsonrpc/erigon_block.go | 4 +- turbo/jsonrpc/erigon_receipts.go | 2 +- turbo/jsonrpc/eth_api.go | 6 +- turbo/jsonrpc/eth_block.go | 6 +- turbo/jsonrpc/eth_block_zkevm.go | 2 +- turbo/jsonrpc/eth_call.go | 12 ++-- turbo/jsonrpc/eth_callMany.go | 2 +- turbo/jsonrpc/eth_callMany_zkevm.go | 2 +- turbo/jsonrpc/eth_receipts.go | 4 +- turbo/jsonrpc/eth_txs.go | 2 +- turbo/jsonrpc/eth_txs_zkevm.go | 2 +- turbo/jsonrpc/eth_uncles.go | 4 +- turbo/jsonrpc/graphql_api.go | 2 +- turbo/jsonrpc/otterscan_api.go | 2 +- turbo/jsonrpc/otterscan_has_code.go | 2 +- turbo/jsonrpc/overlay_api.go | 8 +-- turbo/jsonrpc/trace_adhoc.go | 8 +-- turbo/jsonrpc/trace_filtering.go | 2 +- turbo/jsonrpc/tracing.go | 6 +- turbo/jsonrpc/tracing_zkevm.go | 2 +- turbo/jsonrpc/zkevm_api.go | 6 +- turbo/jsonrpc/zkevm_counters.go | 2 +- turbo/rpchelper/helper_zkevm.go | 99 +++++++++++++++++++++++++++++ 25 files changed, 147 insertions(+), 47 deletions(-) diff --git a/turbo/jsonrpc/bor_helper.go b/turbo/jsonrpc/bor_helper.go index db0ad4ea60b..b9e826c00b7 100644 --- a/turbo/jsonrpc/bor_helper.go +++ b/turbo/jsonrpc/bor_helper.go @@ -58,7 +58,7 @@ func getHeaderByNumber(ctx context.Context, number rpc.BlockNumber, api *BorImpl return block.Header(), nil } - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index 095dc8c0ec1..1f65b7d47c4 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -375,7 +375,8 @@ func (api *PrivateDebugAPIImpl) GetRawHeader(ctx context.Context, blockNrOrHash return nil, err } defer tx.Rollback() - n, h, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + + n, h, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -395,7 +396,7 @@ func (api *PrivateDebugAPIImpl) GetRawBlock(ctx context.Context, blockNrOrHash r return nil, err } defer tx.Rollback() - n, h, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/erigon_block.go b/turbo/jsonrpc/erigon_block.go index f6ef01ef1cb..32a401224ef 100644 --- a/turbo/jsonrpc/erigon_block.go +++ b/turbo/jsonrpc/erigon_block.go @@ -43,7 +43,7 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl } defer tx.Rollback() - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNumber), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNumber), tx, api.filters) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa return nil, err } - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/erigon_receipts.go b/turbo/jsonrpc/erigon_receipts.go index a738e19b509..17c6cb79b91 100644 --- a/turbo/jsonrpc/erigon_receipts.go +++ b/turbo/jsonrpc/erigon_receipts.go @@ -407,7 +407,7 @@ func (api *ErigonImpl) GetBlockReceiptsByBlockHash(ctx context.Context, cannonic } } - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithHash(cannonicalBlockHash, true), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithHash(cannonicalBlockHash, true), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 0c25eea5734..54aa076ee0e 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -286,7 +286,7 @@ func (api *BaseAPI) pendingBlock() *types.Block { } func (api *BaseAPI) blockByRPCNumber(ctx context.Context, number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { - n, h, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -297,7 +297,7 @@ func (api *BaseAPI) blockByRPCNumber(ctx context.Context, number rpc.BlockNumber } func (api *BaseAPI) headerByRPCNumber(ctx context.Context, number rpc.BlockNumber, tx kv.Tx) (*types.Header, error) { - n, h, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -318,7 +318,7 @@ func (api *BaseAPI) checkPruneHistory(tx kv.Tx, block uint64) error { return nil } if p.History.Enabled() { - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, api.filters) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, api.filters) if err != nil { return err } diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index d0155d6c854..2c66239b379 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -77,7 +77,7 @@ func (api *APIImpl) deprecated_CallBundle(ctx context.Context, txHashes []common } defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) + stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber_zkevm(stateBlockNumberOrHash, tx, api.filters) if err != nil { return nil, err } @@ -341,7 +341,7 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN return &n, nil } - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } @@ -388,7 +388,7 @@ func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHas } defer tx.Rollback() - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHash{BlockHash: &blockHash}, tx, nil) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHash{BlockHash: &blockHash}, tx, nil) if err != nil { // (Compatibility) Every other node just return `null` for when the block does not exist. log.Debug("eth_getBlockTransactionCountByHash GetBlockNumber failed", "err", err) diff --git a/turbo/jsonrpc/eth_block_zkevm.go b/turbo/jsonrpc/eth_block_zkevm.go index d0a7d87c77f..6f82477f685 100644 --- a/turbo/jsonrpc/eth_block_zkevm.go +++ b/turbo/jsonrpc/eth_block_zkevm.go @@ -75,7 +75,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat } defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) + stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber_zkevm(stateBlockNumberOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index 690bb746fc3..188ff13331c 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -56,7 +56,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa args.Gas = (*hexutil.Uint64)(&api.GasCap) } - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa // headerByNumberOrHash - intent to read recent headers only, tries from the lru cache before reading from the db func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, api *APIImpl) (*types.Header, error) { - _, bNrOrHashHash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) + _, bNrOrHashHash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.Block return block.Header(), nil } - blockNum, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -227,7 +227,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs } engine := api.engine() - latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks + latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber_zkevm(bNrOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return 0, err } @@ -329,7 +329,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNr, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, } engine := api.engine() - blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber_zkevm(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index 5aa0d59bf9a..f1748f53a83 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -106,7 +106,7 @@ func (api *APIImpl) CallMany_deprecated(ctx context.Context, bundles []Bundle, s defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_callMany_zkevm.go b/turbo/jsonrpc/eth_callMany_zkevm.go index d7b1c03f925..0c90a63e5f2 100644 --- a/turbo/jsonrpc/eth_callMany_zkevm.go +++ b/turbo/jsonrpc/eth_callMany_zkevm.go @@ -62,7 +62,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index e95acd6dcab..1dd8b622468 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -118,7 +118,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (t end = header.Number.Uint64() } else { // Convert the RPC block numbers into internal representations - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, nil) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, nil) if err != nil { return nil, err } @@ -691,7 +691,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber } defer tx.Rollback() - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(*number.BlockNumber), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(*number.BlockNumber), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_txs.go b/turbo/jsonrpc/eth_txs.go index 530fe2352a3..7a4473ab3ae 100644 --- a/turbo/jsonrpc/eth_txs.go +++ b/turbo/jsonrpc/eth_txs.go @@ -236,7 +236,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex_deprecated(ctx context.C } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_txs_zkevm.go b/turbo/jsonrpc/eth_txs_zkevm.go index ac4cb12de00..1fc0fc26788 100644 --- a/turbo/jsonrpc/eth_txs_zkevm.go +++ b/turbo/jsonrpc/eth_txs_zkevm.go @@ -197,7 +197,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_uncles.go b/turbo/jsonrpc/eth_uncles.go index f0fdeb646e2..66c3a7bcbf0 100644 --- a/turbo/jsonrpc/eth_uncles.go +++ b/turbo/jsonrpc/eth_uncles.go @@ -32,7 +32,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp } defer tx.Rollback() - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -102,7 +102,7 @@ func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.B } defer tx.Rollback() - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return &n, err } diff --git a/turbo/jsonrpc/graphql_api.go b/turbo/jsonrpc/graphql_api.go index 44eff638c60..e5b4034b30f 100644 --- a/turbo/jsonrpc/graphql_api.go +++ b/turbo/jsonrpc/graphql_api.go @@ -101,7 +101,7 @@ func (api *GraphQLAPIImpl) getBlockWithSenders(ctx context.Context, number rpc.B return api.pendingBlock(), nil, nil } - blockHeight, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockHeight, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/otterscan_api.go b/turbo/jsonrpc/otterscan_api.go index 9b925903547..54b07f102bb 100644 --- a/turbo/jsonrpc/otterscan_api.go +++ b/turbo/jsonrpc/otterscan_api.go @@ -569,7 +569,7 @@ func (api *OtterscanAPIImpl) getBlockWithSenders(ctx context.Context, number rpc return api.pendingBlock(), nil, nil } - n, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/otterscan_has_code.go b/turbo/jsonrpc/otterscan_has_code.go index af442e8d000..8f9bfd1fe55 100644 --- a/turbo/jsonrpc/otterscan_has_code.go +++ b/turbo/jsonrpc/otterscan_has_code.go @@ -17,7 +17,7 @@ func (api *OtterscanAPIImpl) HasCode(ctx context.Context, address common.Address } defer tx.Rollback() - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return false, err } diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index 0b6949f5b87..856452959c4 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -420,7 +420,7 @@ func (api *OverlayAPIImpl) replayBlock(ctx context.Context, blockNum uint64, sta overrideBlockHash = make(map[uint64]common.Hash) blockNumber := rpc.BlockNumber(blockNum) - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHash{BlockNumber: &blockNumber}, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHash{BlockNumber: &blockNumber}, tx, api.filters) if err != nil { return nil, err } @@ -580,7 +580,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter end = num } else { // Convert the RPC block numbers into internal representations - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil) if err != nil { return 0, 0, err } @@ -592,7 +592,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter begin = uint64(fromBlock) } else { blockNum := rpc.BlockNumber(fromBlock) - begin, _, _, err = rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) + begin, _, _, err = rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) if err != nil { return 0, 0, err } @@ -606,7 +606,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter end = uint64(toBlock) } else { blockNum := rpc.BlockNumber(toBlock) - end, _, _, err = rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) + end, _, _, err = rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) if err != nil { return 0, 0, err } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 99f7d259932..224127f4429 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -848,7 +848,7 @@ func (api *TraceAPIImpl) ReplayBlockTransactions(ctx context.Context, blockNrOrH return nil, err } - blockNumber, blockHash, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -923,7 +923,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp blockNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*blockNrOrHash, tx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -1095,7 +1095,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa var num = rpc.LatestBlockNumber parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*parentNrOrHash, dbtx, api.filters) if err != nil { return nil, err } @@ -1141,7 +1141,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type var num = rpc.LatestBlockNumber parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*parentNrOrHash, dbtx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index 66dd279e23a..2cf36b3768d 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -174,7 +174,7 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber, gas return nil, err } defer tx.Rollback() - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 4372a0692b7..31f1efb9d87 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -62,7 +62,7 @@ func (api *PrivateDebugAPIImpl) traceBlock_deprecated(ctx context.Context, block return fmt.Errorf("invalid arguments; neither block nor hash specified") } - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { stream.WriteNil() return err @@ -307,7 +307,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA } engine := api.engine() - blockNumber, hash, isLatest, err := rpchelper.GetBlockNumber(blockNrOrHash, dbtx, api.filters) + blockNumber, hash, isLatest, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, dbtx, api.filters) if err != nil { return fmt.Errorf("get block number: %v", err) } @@ -405,7 +405,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany_deprecated(ctx context.Context, bu defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { stream.WriteNil() return err diff --git a/turbo/jsonrpc/tracing_zkevm.go b/turbo/jsonrpc/tracing_zkevm.go index 3a0a2dd69fe..c54a10fe173 100644 --- a/turbo/jsonrpc/tracing_zkevm.go +++ b/turbo/jsonrpc/tracing_zkevm.go @@ -209,7 +209,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { stream.WriteNil() return err diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 8825bbbe44a..22e10ff9e82 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -1036,12 +1036,12 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetCanonicalBlockNumber(startBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks + blockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(startBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } - endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks + endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err @@ -1635,7 +1635,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNr, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/zkevm_counters.go b/turbo/jsonrpc/zkevm_counters.go index 7d28b26ad21..00106390d85 100644 --- a/turbo/jsonrpc/zkevm_counters.go +++ b/turbo/jsonrpc/zkevm_counters.go @@ -130,7 +130,7 @@ func (zkapi *ZkEvmAPIImpl) EstimateCounters(ctx context.Context, rpcTx *zkevmRPC } engine := api.engine() - latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks + latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber_zkevm(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } diff --git a/turbo/rpchelper/helper_zkevm.go b/turbo/rpchelper/helper_zkevm.go index 754d42c1cbd..659c23df7de 100644 --- a/turbo/rpchelper/helper_zkevm.go +++ b/turbo/rpchelper/helper_zkevm.go @@ -1,12 +1,18 @@ package rpchelper import ( + "errors" "fmt" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) func GetBatchNumber(rpcBatchNumber rpc.BlockNumber, tx kv.Tx, filters *Filters) (batchNumber uint64, latest bool, err error) { @@ -41,3 +47,96 @@ func GetBatchNumber(rpcBatchNumber rpc.BlockNumber, tx kv.Tx, filters *Filters) return batchNumber, latest, nil } + +func GetBlockNumber_zkevm(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, libcommon.Hash, bool, error) { + return _GetBlockNumber_zkevm(blockNrOrHash.RequireCanonical, blockNrOrHash, tx, filters) +} + +func GetCanonicalBlockNumber_zkevm(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, libcommon.Hash, bool, error) { + return _GetBlockNumber_zkevm(true, blockNrOrHash, tx, filters) +} + +func _GetBlockNumber_zkevm(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash libcommon.Hash, latest bool, err error) { + blockFinalizationType := stages.Finish + if sequencer.IsSequencer() { + blockFinalizationType = stages.Execution + } + + finishedBlockNumber, err := stages.GetStageProgress(tx, blockFinalizationType) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } + + var ok bool + hash, ok = blockNrOrHash.Hash() + if !ok { + number := *blockNrOrHash.BlockNumber + switch number { + case rpc.LatestBlockNumber: + if blockNumber, err = GetLatestFinishedBlockNumber(tx); err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.EarliestBlockNumber: + blockNumber = 0 + case rpc.FinalizedBlockNumber: + if whitelist.GetWhitelistingService() != nil { + num := borfinality.GetFinalizedBlockNumber(tx) + if num == 0 { + // nolint + return 0, libcommon.Hash{}, false, errors.New("No finalized block") + } + + blockNum := borfinality.CurrentFinalizedBlock(tx, num).NumberU64() + blockHash := rawdb.ReadHeaderByNumber(tx, blockNum).Hash() + return blockNum, blockHash, false, nil + } + blockNumber, err = GetFinalizedBlockNumber(tx) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.SafeBlockNumber: + // [zkevm] safe not available, returns finilized instead + // blockNumber, err = GetSafeBlockNumber(tx) + blockNumber, err = GetFinalizedBlockNumber(tx) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.PendingBlockNumber: + pendingBlock := filters.LastPendingBlock() + if pendingBlock == nil { + blockNumber = finishedBlockNumber + } else { + return pendingBlock.NumberU64(), pendingBlock.Hash(), false, nil + } + case rpc.LatestExecutedBlockNumber: + blockNumber, err = stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting latest executed block number: %w", err) + } + default: + blockNumber = uint64(number.Int64()) + if blockNumber > finishedBlockNumber { + return 0, libcommon.Hash{}, false, fmt.Errorf("block with number %d not found", blockNumber) + } + } + hash, err = rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + } else { + number := rawdb.ReadHeaderNumber(tx, hash) + if number == nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("block %x not found", hash) + } + blockNumber = *number + + ch, err := rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + if requireCanonical && ch != hash { + return 0, libcommon.Hash{}, false, nonCanonocalHashError{hash} + } + } + return blockNumber, hash, blockNumber == finishedBlockNumber, nil +} From 8884d2b033429e747c892db4a44b937b044749ef Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:49:21 +0000 Subject: [PATCH 35/88] increase timeouts for unwind tests (#1445) * increase timeouts for unwind tests * remove timeout on unwind stream host --- zk/tests/unwinds/unwind.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/zk/tests/unwinds/unwind.sh b/zk/tests/unwinds/unwind.sh index 84b5f436180..b48f1c15c55 100755 --- a/zk/tests/unwinds/unwind.sh +++ b/zk/tests/unwinds/unwind.sh @@ -15,8 +15,8 @@ dataPath="./datadir" firstStop=11204 stopBlock=11315 unwindBatch=70 -firstTimeout=150s -secondTimeout=150s +firstTimeout=300s +secondTimeout=300s rm -rf "$dataPath/rpc-datadir" rm -rf "$dataPath/phase1-dump1" @@ -27,10 +27,10 @@ rm -rf "$dataPath/phase1-diffs" rm -rf "$dataPath/phase2-diffs" # run datastream server -timeout 600s go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & +go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & # in order to start the datastream server -sleep 5 +sleep 10 # run erigon for a while to sync to the unwind point to capture the dump timeout $firstTimeout ./build/bin/cdk-erigon \ From 4bc51c4d4107f69f4c3c42c5c9ac341071a6375c Mon Sep 17 00:00:00 2001 From: Moretti Georgiev Date: Tue, 12 Nov 2024 12:50:02 +0200 Subject: [PATCH 36/88] feat: add mock witness generation (#1436) * feat: add mock witness generation base on flag and executorUrls emptiness * chore: remove logging of index for mock witness generation * feat: log start and end block number, instead of each block for mock witness --- cmd/utils/flags.go | 5 +++++ eth/ethconfig/config_zkevm.go | 1 + turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 1 + zk/witness/witness.go | 25 ++++++++++++++++++++++++- 5 files changed, 32 insertions(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 756b1b9628c..59936ed948a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -758,6 +758,11 @@ var ( Usage: "Seal the batch immediately when detecting a counter overflow", Value: false, } + MockWitnessGeneration = cli.BoolFlag{ + Name: "zkevm.mock-witness-generation", + Usage: "Mock the witness generation", + Value: false, + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", Usage: "Number of entries to print from the ACL history on node start up", diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 93a539b49a1..375bbd10793 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -93,6 +93,7 @@ type Zk struct { InfoTreeUpdateInterval time.Duration BadBatches []uint64 SealBatchImmediatelyOnOverflow bool + MockWitnessGeneration bool } var DefaultZkConfig = &Zk{} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 9e9c9ba7af9..ffc404728dd 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -288,4 +288,5 @@ var DefaultFlags = []cli.Flag{ &utils.ACLPrintHistory, &utils.InfoTreeUpdateInterval, &utils.SealBatchImmediatelyOnOverflow, + &utils.MockWitnessGeneration, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index cc8416d4e40..5ad9ddec3d2 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -209,6 +209,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name), InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), + MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), } utils2.EnableTimer(cfg.DebugTimers) diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 2350fd250fc..1c1bb3acf7c 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -191,6 +191,12 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint log.Info("Generating witness timing", "batch", batchNum, "blockFrom", blocks[0].NumberU64(), "blockTo", blocks[len(blocks)-1].NumberU64(), "taken", diff) }() + areExecutorUrlsEmpty := len(g.zkConfig.ExecutorUrls) == 0 || g.zkConfig.ExecutorUrls[0] == "" + shouldGenerateMockWitness := g.zkConfig.MockWitnessGeneration && areExecutorUrlsEmpty + if shouldGenerateMockWitness { + return g.generateMockWitness(batchNum, blocks, debug) + } + endBlock := blocks[len(blocks)-1].NumberU64() startBlock := blocks[0].NumberU64() @@ -324,7 +330,6 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint chainReader := stagedsync.NewChainReaderImpl(g.chainCfg, tx, nil, log.New()) _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot) - if err != nil { return nil, err } @@ -362,3 +367,21 @@ func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { } return buf.Bytes(), nil } + +func (g *Generator) generateMockWitness(batchNum uint64, blocks []*eritypes.Block, debug bool) ([]byte, error) { + mockWitness := []byte("mockWitness") + startBlockNumber := blocks[0].NumberU64() + endBlockNumber := blocks[len(blocks)-1].NumberU64() + + if debug { + log.Info( + "Generated mock witness", + "witness", mockWitness, + "batch", batchNum, + "startBlockNumber", startBlockNumber, + "endBlockNumber", endBlockNumber, + ) + } + + return mockWitness, nil +} From 4792c13ee3304d13aca8e5a10efd74f0f393cf9d Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Tue, 12 Nov 2024 11:39:26 +0000 Subject: [PATCH 37/88] tweak(zkevm_api): accinput batch 0/1 0x00...0 (#1446) --- turbo/jsonrpc/zkevm_api.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 22e10ff9e82..ec25e9e3fab 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -747,6 +747,16 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) } + // if we are asking for the injected batch or genesis return 0x0..0 + if (batchNum == 0 || batchNum == 1) && prevSequence.BatchNo == 0 { + return &common.Hash{}, nil + } + + // if prev is 0, set to 1 (injected batch) + if prevSequence.BatchNo == 0 { + prevSequence.BatchNo = 1 + } + // get batch range for sequence prevSequenceBatch, currentSequenceBatch := prevSequence.BatchNo, batchSequence.BatchNo // get call data for tx From c33bf57bfea650a6d01885eaa17873fc38bf9630 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Tue, 12 Nov 2024 20:41:44 +0900 Subject: [PATCH 38/88] feat: rollupaddress and rollupmanageraddress rpc method Signed-off-by: Ji Hwan --- docs/endpoints/endpoints.md | 2 ++ turbo/jsonrpc/zkevm_api.go | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/docs/endpoints/endpoints.md b/docs/endpoints/endpoints.md index 20f27be1a59..81703a3d7b7 100644 --- a/docs/endpoints/endpoints.md +++ b/docs/endpoints/endpoints.md @@ -202,6 +202,8 @@ If the endpoint is not in the list below, it means this specific endpoint is not - zkevm_getProverInput - zkevm_getVersionHistory - zkevm_getWitness +- zkevm_getRollupAddress +- zkevm_getRollupManagerAddress - zkevm_isBlockConsolidated - zkevm_isBlockVirtualized - zkevm_verifiedBatchNumber diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index fe7aee56b31..b81c63f6f03 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -78,6 +78,8 @@ type ZkEvmAPI interface { GetForkById(ctx context.Context, forkId hexutil.Uint64) (res json.RawMessage, err error) GetForkIdByBatchNumber(ctx context.Context, batchNumber rpc.BlockNumber) (hexutil.Uint64, error) GetForks(ctx context.Context) (res json.RawMessage, err error) + GetRollupAddress(ctx context.Context) (res json.RawMessage, err error) + GetRollupManagerAddress(ctx context.Context) (res json.RawMessage, err error) } const getBatchWitness = "getBatchWitness" @@ -1846,3 +1848,25 @@ func (api *ZkEvmAPIImpl) GetForks(ctx context.Context) (res json.RawMessage, err return forksJson, err } + +func (api *ZkEvmAPIImpl) GetRollupAddress(ctx context.Context) (res json.RawMessage, err error) { + rollupAddress := api.config.AddressZkevm + + rollupAddressJson, err := json.Marshal(rollupAddress) + if err != nil { + return nil, err + } + + return rollupAddressJson, err +} + +func (api *ZkEvmAPIImpl) GetRollupManagerAddress(ctx context.Context) (res json.RawMessage, err error) { + rollupManagerAddress := api.config.AddressRollup + + rollupManagerAddressJson, err := json.Marshal(rollupManagerAddress) + if err != nil { + return nil, err + } + + return rollupManagerAddressJson, err +} From 4735decf2b82a8263dc9dfd441835102b66433b4 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Tue, 12 Nov 2024 20:54:20 +0900 Subject: [PATCH 39/88] test: add unit tests for zkevm_getrollupaddress and zkevm_getrollupmanageraddress Signed-off-by: Ji Hwan --- turbo/jsonrpc/zkevm_api_test.go | 38 +++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 98d48ab2e24..9f3d5389b9f 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -1448,3 +1448,41 @@ func TestGetForks(t *testing.T) { assert.Equal(forks[2].Version, "") assert.Equal(forks[2].BlockNumber, hexutil.Uint64(3000)) } + +func TestGetRollupAddress(t *testing.T) { + assert := assert.New(t) + + // Init new ZkConfig + cfgZk := ethconfig.DefaultZkConfig + assert.NotNil(cfgZk) + + // Check rollup address of default ZkConfig + assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x0")) + + // Modify ZkConfig + cfgZk.AddressZkevm = common.HexToAddress("0x1") + assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x1")) + cfgZk.AddressZkevm = common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A") + assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A")) + cfgZk.AddressZkevm = common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9") + assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9")) +} + +func TestGetRollupManagerAddress(t *testing.T) { + assert := assert.New(t) + + // Init new ZkConfig + cfgZk := ethconfig.DefaultZkConfig + assert.NotNil(cfgZk) + + // Check rollup address of default ZkConfig + assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x0")) + + // Modify ZkConfig + cfgZk.AddressRollup = common.HexToAddress("0x1") + assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x1")) + cfgZk.AddressRollup = common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A") + assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A")) + cfgZk.AddressRollup = common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9") + assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9")) +} From eb66298d6725aa999de743f5220639568f4b97bb Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Tue, 12 Nov 2024 21:01:23 +0900 Subject: [PATCH 40/88] chore: cleanup Signed-off-by: Ji Hwan --- turbo/jsonrpc/zkevm_api_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 9f3d5389b9f..73d215d8190 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -1475,7 +1475,7 @@ func TestGetRollupManagerAddress(t *testing.T) { cfgZk := ethconfig.DefaultZkConfig assert.NotNil(cfgZk) - // Check rollup address of default ZkConfig + // Check rollup manager address of default ZkConfig assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x0")) // Modify ZkConfig From b1eed6d4d99855ad926a802174affa1e1aaab7c6 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Tue, 12 Nov 2024 21:10:08 +0900 Subject: [PATCH 41/88] chore: lint Signed-off-by: Ji Hwan --- docs/endpoints/endpoints.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/endpoints/endpoints.md b/docs/endpoints/endpoints.md index 81703a3d7b7..e8756c070a7 100644 --- a/docs/endpoints/endpoints.md +++ b/docs/endpoints/endpoints.md @@ -200,10 +200,10 @@ If the endpoint is not in the list below, it means this specific endpoint is not - zkevm_getL2BlockInfoTree - zkevm_getLatestGlobalExitRoot - zkevm_getProverInput -- zkevm_getVersionHistory -- zkevm_getWitness - zkevm_getRollupAddress - zkevm_getRollupManagerAddress +- zkevm_getVersionHistory +- zkevm_getWitness - zkevm_isBlockConsolidated - zkevm_isBlockVirtualized - zkevm_verifiedBatchNumber From 83d4db25a8cfaac2d0ce0540da6b661ac423d262 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:51:25 +0200 Subject: [PATCH 42/88] fix: health check block timestamp fix (#1448) --- cmd/rpcdaemon/health/check_time.go | 10 ++++++---- cmd/rpcdaemon/health/health_test.go | 8 ++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cmd/rpcdaemon/health/check_time.go b/cmd/rpcdaemon/health/check_time.go index ffdfde24bde..43ea4af63cb 100644 --- a/cmd/rpcdaemon/health/check_time.go +++ b/cmd/rpcdaemon/health/check_time.go @@ -5,6 +5,8 @@ import ( "fmt" "net/http" + "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" ) @@ -20,13 +22,13 @@ func checkTime( if err != nil { return err } - timestamp := 0 + timestamp := uint64(0) if ts, ok := i["timestamp"]; ok { - if cs, ok := ts.(uint64); ok { - timestamp = int(cs) + if cs, ok := ts.(hexutil.Uint64); ok { + timestamp = cs.Uint64() } } - if timestamp < seconds { + if timestamp < uint64(seconds) { return fmt.Errorf("%w: got ts: %d, need: %d", errTimestampTooOld, timestamp, seconds) } diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go index 419c7b9912b..079bedb3165 100644 --- a/cmd/rpcdaemon/health/health_test.go +++ b/cmd/rpcdaemon/health/health_test.go @@ -245,7 +245,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(-10 * time.Second).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(-10 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -264,7 +264,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(-1 * time.Hour).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(-1 * time.Hour).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -283,7 +283,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(1 * time.Hour).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(1 * time.Hour).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -319,7 +319,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(10), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(1 * time.Second).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(1 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, From 797ea50a58df2575d9b5223652586a5e2f7a58d4 Mon Sep 17 00:00:00 2001 From: Jerry Date: Tue, 12 Nov 2024 07:58:20 -0800 Subject: [PATCH 43/88] Add CPU monitor in CI (#1389) * Add CPU monitor in CI * feat: adding make command to run it also. * feat: fixing script to also run on macOS, adding README to future newcomers --------- Co-authored-by: Arthur Abeilice --- .github/scripts/cpu_monitor.sh | 136 +++++++++++++++++++++++++++++++++ .github/workflows/ci_zkevm.yml | 17 +++++ Makefile | 3 + README.md | 9 +++ 4 files changed, 165 insertions(+) create mode 100755 .github/scripts/cpu_monitor.sh diff --git a/.github/scripts/cpu_monitor.sh b/.github/scripts/cpu_monitor.sh new file mode 100755 index 00000000000..563fb952273 --- /dev/null +++ b/.github/scripts/cpu_monitor.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Configuration +THRESHOLD=80 +MEASUREMENTS_FILE="/tmp/cpu_measurements.txt" +MONITOR_INTERVAL=5 # seconds +PROCESS_NAME="cdk-erigon" +DETAILED_LOG="/tmp/cpu_detailed.log" + +# Function to get CPU usage for all matching processes +get_process_cpu() { + # Clear previous detailed log + > "$DETAILED_LOG" + + # Get PIDs of cdk-erigon processes + pids=$(pgrep -f "[c]dk-erigon") + + if [ -n "$pids" ]; then + # Use top in batch mode for each PID to get current CPU usage + for pid in $pids; do + # Get process command + if [[ "$OSTYPE" == "darwin"* ]]; then + cmd=$(ps -p $pid -o command=) + cpu=$(top -l 1 -pid $pid | tail -1 | awk '{print $3}') + else + cmd=$(ps -p $pid -o cmd=) + cpu=$(top -b -n 1 -p $pid | tail -1 | awk '{print $9}') + fi + # Get current CPU usage + echo "$pid $cpu $cmd" >> "$DETAILED_LOG" + done + fi + + # Sum total CPU usage + total_cpu=$(awk '{sum += $2} END {printf "%.1f", sum}' "$DETAILED_LOG") + + # Return 0 if no process found + if [ -z "$total_cpu" ]; then + echo "0.0" + else + echo "$total_cpu" + fi +} + +# Function to show current process details +show_process_details() { + if [ -s "$DETAILED_LOG" ]; then + echo "Individual process details:" + printf "%-10s %-8s %-s\n" "PID" "CPU%" "Command" + echo "----------------------------------------" + while read -r line; do + pid=$(echo "$line" | awk '{print $1}') + cpu=$(echo "$line" | awk '{print $2}') + cmd=$(echo "$line" | cut -d' ' -f3-) + printf "%-10s %-8.1f %-s\n" "$pid" "$cpu" "$cmd" + done < "$DETAILED_LOG" + echo "----------------------------------------" + else + echo "No $PROCESS_NAME processes found" + fi +} + +# Function to analyze CPU measurements +analyze_cpu() { + if [ -f "$MEASUREMENTS_FILE" ]; then + # Calculate statistics + avg_cpu=$(awk '{ sum += $1 } END { print sum/NR }' "$MEASUREMENTS_FILE") + avg_cpu_rounded=$(printf "%.1f" "$avg_cpu") + max_cpu=$(awk 'BEGIN{max=0} {if($1>max) max=$1} END{print max}' "$MEASUREMENTS_FILE") + measurement_count=$(wc -l < "$MEASUREMENTS_FILE") + + echo "" + echo "=== CPU Usage Analysis for all $PROCESS_NAME processes ===" + echo "Number of measurements: $measurement_count" + echo "Average Combined CPU Usage: $avg_cpu_rounded%" + echo "Peak Combined CPU Usage: $max_cpu%" + echo "Threshold: $THRESHOLD%" + + # Get final process details for the report + echo "" + echo "Final process state:" + show_process_details + + # Compare with threshold + if [ "$(echo "$avg_cpu > $THRESHOLD" | bc -l)" -eq 1 ]; then + echo "" + echo "ERROR: Average CPU usage ($avg_cpu_rounded%) exceeded threshold of $THRESHOLD%" + cleanup_and_exit 1 + else + echo "" + echo "SUCCESS: CPU usage ($avg_cpu_rounded%) is within threshold of $THRESHOLD%" + cleanup_and_exit 0 + fi + else + echo "ERROR: No CPU measurements found at $MEASUREMENTS_FILE" + cleanup_and_exit 1 + fi +} + +# Function to clean up and exit +cleanup_and_exit() { + exit_code=$1 + rm -f "$DETAILED_LOG" + exit $exit_code +} + +# Function to handle interruption +handle_interrupt() { + echo "" + echo "Monitoring interrupted. Analyzing collected data..." + analyze_cpu +} + +# Set up trap for various signals +trap handle_interrupt TERM INT + +# Clear measurements file +> "$MEASUREMENTS_FILE" +> "$DETAILED_LOG" + +echo "Starting CPU monitoring for all '$PROCESS_NAME' processes" +echo "Storing measurements in $MEASUREMENTS_FILE" +echo "Monitoring interval: ${MONITOR_INTERVAL}s" +echo "Press Ctrl+C to stop monitoring and see analysis" +echo "" + +# Start monitoring loop +while true; do + # Get CPU usage for all matching processes + cpu_usage=$(get_process_cpu) + echo "$cpu_usage" >> "$MEASUREMENTS_FILE" + echo "$(date '+%Y-%m-%d %H:%M:%S') - Combined CPU Usage: $cpu_usage%" + show_process_details + echo "" + sleep "$MONITOR_INTERVAL" +done \ No newline at end of file diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 877f30f08a0..9b657266115 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -117,6 +117,23 @@ jobs: run: | kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"data_availability_mode": "${{ matrix.da-mode }}", "cdk_erigon_node_image": "cdk-erigon:local"}}' + - name: Run process with CPU monitoring + working-directory: ./cdk-erigon + run: | + # Start monitoring in background + bash ./.github/scripts/cpu_monitor.sh & + monitor_pid=$! + + # Wait for 30 seconds + sleep 30 + + # Stop monitoring and get analysis + kill -TERM $monitor_pid + wait $monitor_pid || { + echo "CPU usage exceeded threshold!" + exit 1 + } + - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash diff --git a/Makefile b/Makefile index c84c657c03f..e6d40c3c252 100644 --- a/Makefile +++ b/Makefile @@ -194,6 +194,9 @@ lint: @./erigon-lib/tools/golangci_lint.sh @./erigon-lib/tools/mod_tidy_check.sh +cpu_monitor: + @.github/scripts/cpu_monitor.sh + ## clean: cleans the go cache, build dir, libmdbx db dir clean: go clean -cache diff --git a/README.md b/README.md index 4e58a99cadb..4fc265aecdd 100644 --- a/README.md +++ b/README.md @@ -208,6 +208,15 @@ Useful config entries: - `zkevm.sync-limit`: This will ensure the network only syncs to a given block height. - `debug.timers`: This will enable debug timers in the logs to help with performance tuning. Recording timings of witness generation, etc. at INFO level. +Metrics and pprof configuration flags: + +- `metrics:` Enables or disables the metrics collection. Set to true to enable. +- `metrics.addr`: The address on which the metrics server will listen. Default is "0.0.0.0". +- `metrics.port`: The port on which the metrics server will listen. Default is 6060. +- `pprof`: Enables or disables the pprof profiling. Set to true to enable. +- `pprof.addr`: The address on which the pprof server will listen. Default is "0.0.0.0". +- `pprof.port`: The port on which the pprof server will listen. Default is 6061. + *** From 4dbfc3b6e8ef7a051f177169cc7d83635e9e1165 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:55:03 +0000 Subject: [PATCH 44/88] bug fix in close batch status for RPC node (#1449) --- turbo/jsonrpc/zkevm_api.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 0af21dda657..2d08e56004e 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -592,8 +592,6 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp batch.Closed = true } } - - batch.Closed = batchNo <= latestClosedbatchNum } // verification - if we can't find one, maybe this batch was verified along with a higher batch number From 6ce43bd71b2ec88ddfbbd70c94e9473897173312 Mon Sep 17 00:00:00 2001 From: tclemos Date: Tue, 12 Nov 2024 15:43:53 -0300 Subject: [PATCH 45/88] WIP: data stream refactoring to testable code; add data stream catch up test; pending asserts --- cmd/rpcdaemon/cli/config_zkevm.go | 5 +- eth/backend.go | 35 +- turbo/jsonrpc/daemon.go | 7 +- turbo/jsonrpc/zkevm_api.go | 12 +- turbo/stages/zk_stages.go | 14 +- .../mock_services/data_stream_server_mock.go | 660 ++++++++++++++++++ .../mock_services/stream_server_mock.go | 576 +++++++++++++++ zk/datastream/server/data_stream_server.go | 87 ++- zk/datastream/server/datastream_populate.go | 38 +- zk/datastream/server/interfaces.go | 58 ++ zk/debug_tools/datastream-host/main.go | 8 +- .../legacy_executor_verifier.go | 8 +- ...tchup.go => stage_data_stream_catch_up.go} | 31 +- zk/stages/stage_data_stream_catch_up_test.go | 99 +++ zk/stages/stage_sequence_execute.go | 4 +- .../stage_sequence_execute_data_stream.go | 14 +- .../stage_sequence_execute_resequence.go | 4 +- zk/stages/stage_sequence_execute_utils.go | 17 +- zk/stages/stages.go | 5 + 19 files changed, 1548 insertions(+), 134 deletions(-) create mode 100644 zk/datastream/mock_services/data_stream_server_mock.go create mode 100644 zk/datastream/mock_services/stream_server_mock.go create mode 100644 zk/datastream/server/interfaces.go rename zk/stages/{stage_dataStreamCatchup.go => stage_data_stream_catch_up.go} (82%) create mode 100644 zk/stages/stage_data_stream_catch_up_test.go diff --git a/cmd/rpcdaemon/cli/config_zkevm.go b/cmd/rpcdaemon/cli/config_zkevm.go index 050dc1643c9..59e26d6c5bb 100644 --- a/cmd/rpcdaemon/cli/config_zkevm.go +++ b/cmd/rpcdaemon/cli/config_zkevm.go @@ -2,11 +2,12 @@ package cli import ( "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/log/v3" ) -func StartDataStream(server *datastreamer.StreamServer) error { +func StartDataStream(server server.StreamServer) error { if server == nil { // no stream server to start, we might not have the right flags set to create one return nil diff --git a/eth/backend.go b/eth/backend.go index 7299e75844f..189eb4a0a58 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -129,8 +129,10 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/datastream/client" + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1_cache" + "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" zkStages "github.com/ledgerwatch/erigon/zk/stages" "github.com/ledgerwatch/erigon/zk/syncer" @@ -139,9 +141,10 @@ import ( "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/etherman" - "github.com/ledgerwatch/erigon/zk/l1infotree" ) +var dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() + // Config contains the configuration options of the ETH protocol. // Deprecated: use ethconfig.Config instead. type Config = ethconfig.Config @@ -219,7 +222,7 @@ type Ethereum struct { logger log.Logger // zk - dataStream *datastreamer.StreamServer + streamServer server.StreamServer l1Syncer *syncer.L1Syncer etherManClients []*etherman.Client l1Cache *l1_cache.L1Cache @@ -977,8 +980,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger Level: "warn", Outputs: nil, } + // todo [zkevm] read the stream version from config and figure out what system id is used for - backend.dataStream, err = datastreamer.NewServer(uint16(httpCfg.DataStreamPort), uint8(backend.config.DatastreamVersion), 1, datastreamer.StreamType(1), file, httpCfg.DataStreamWriteTimeout, httpCfg.DataStreamInactivityTimeout, httpCfg.DataStreamInactivityCheckInterval, logConfig) + backend.streamServer, err = dataStreamServerFactory.CreateStreamServer(uint16(httpCfg.DataStreamPort), uint8(backend.config.DatastreamVersion), 1, datastreamer.StreamType(1), file, httpCfg.DataStreamWriteTimeout, httpCfg.DataStreamInactivityTimeout, httpCfg.DataStreamInactivityCheckInterval, logConfig) if err != nil { return nil, err } @@ -986,7 +990,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // recovery here now, if the stream got into a bad state we want to be able to delete the file and have // the stream re-populated from scratch. So we check the stream for the latest header and if it is // 0 we can just set the datastream progress to 0 also which will force a re-population of the stream - latestHeader := backend.dataStream.GetHeader() + latestHeader := backend.streamServer.GetHeader() if latestHeader.TotalEntries == 0 { log.Info("[dataStream] setting the stream progress to 0") backend.preStartTasks.WarmUpDataStream = true @@ -1100,6 +1104,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger l1InfoTreeUpdater := l1infotree.NewUpdater(cfg.Zk, l1InfoTreeSyncer) + var dataStreamServer server.DataStreamServer + if backend.streamServer != nil { + dataStreamServer = dataStreamServerFactory.CreateDataStreamServer(backend.streamServer, backend.chainConfig.ChainID.Uint64()) + } + if isSequencer { // if we are sequencing transactions, we do the sequencing loop... witnessGenerator := witness.NewGenerator( @@ -1129,10 +1138,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger verifier := legacy_executor_verifier.NewLegacyExecutorVerifier( *cfg.Zk, legacyExecutors, - backend.chainConfig, backend.chainDB, witnessGenerator, - backend.dataStream, + dataStreamServer, ) if cfg.Zk.Limbo { @@ -1167,7 +1175,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.agg, backend.forkValidator, backend.engine, - backend.dataStream, + dataStreamServer, backend.l1Syncer, seqVerSyncer, l1BlockSyncer, @@ -1209,7 +1217,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.engine, backend.l1Syncer, streamClient, - backend.dataStream, + dataStreamServer, l1InfoTreeUpdater, ) @@ -1330,7 +1338,11 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig // apiList := jsonrpc.APIList(chainKv, borDb, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config, backend.l1Syncer) // authApiList := jsonrpc.AuthAPIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config) - s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, s.txPool2, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, config, s.l1Syncer, s.logger, s.dataStream) + var dataStreamServer server.DataStreamServer + if s.streamServer != nil { + dataStreamServer = dataStreamServerFactory.CreateDataStreamServer(s.streamServer, config.Zk.L2ChainId) + } + s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, s.txPool2, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, config, s.l1Syncer, s.logger, dataStreamServer) if config.SilkwormRpcDaemon && httpRpcCfg.Enabled { interface_log_settings := silkworm.RpcInterfaceLogSettings{ @@ -1368,7 +1380,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig } go func() { - if err := cli.StartDataStream(s.dataStream); err != nil { + if err := cli.StartDataStream(s.streamServer); err != nil { log.Error(err.Error()) return } @@ -1391,8 +1403,9 @@ func (s *Ethereum) PreStart() error { // we don't know when the server has actually started as it doesn't expose a signal that is has spun up // so here we loop and take a brief pause waiting for it to be ready attempts := 0 + dataStreamServer := dataStreamServerFactory.CreateDataStreamServer(s.streamServer, s.chainConfig.ChainID.Uint64()) for { - _, err = zkStages.CatchupDatastream(s.sentryCtx, "stream-catchup", tx, s.dataStream, s.chainConfig.ChainID.Uint64()) + _, err = zkStages.CatchupDatastream(s.sentryCtx, "stream-catchup", tx, dataStreamServer) if err != nil { if errors.Is(err, datastreamer.ErrAtomicOpNotAllowed) { attempts++ diff --git a/turbo/jsonrpc/daemon.go b/turbo/jsonrpc/daemon.go index 90a86af8116..2d1736732f0 100644 --- a/turbo/jsonrpc/daemon.go +++ b/turbo/jsonrpc/daemon.go @@ -17,18 +17,17 @@ import ( "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/sequencer" "github.com/ledgerwatch/erigon/zk/syncer" - txpool2 "github.com/ledgerwatch/erigon/zk/txpool" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" ) // APIList describes the list of available RPC apis func APIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, rawPool *txpool2.TxPool, mining txpool.MiningClient, filters *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, agg *libstate.Aggregator, cfg *httpcfg.HttpCfg, engine consensus.EngineReader, - ethCfg *ethconfig.Config, l1Syncer *syncer.L1Syncer, logger log.Logger, datastreamServer *datastreamer.StreamServer, + ethCfg *ethconfig.Config, l1Syncer *syncer.L1Syncer, logger log.Logger, dataStreamServer server.DataStreamServer, ) (list []rpc.API) { // non-sequencer nodes should forward on requests to the sequencer rpcUrl := "" @@ -69,7 +68,7 @@ func APIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, r otsImpl := NewOtterscanAPI(base, db, cfg.OtsMaxPageSize) gqlImpl := NewGraphQLAPI(base, db) overlayImpl := NewOverlayAPI(base, db, cfg.Gascap, cfg.OverlayGetLogsTimeout, cfg.OverlayReplayBlockTimeout, otsImpl) - zkEvmImpl := NewZkEvmAPI(ethImpl, db, cfg.ReturnDataLimit, ethCfg, l1Syncer, rpcUrl, datastreamServer) + zkEvmImpl := NewZkEvmAPI(ethImpl, db, cfg.ReturnDataLimit, ethCfg, l1Syncer, rpcUrl, dataStreamServer) if cfg.GraphQLEnabled { list = append(list, rpc.API{ diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index ec25e9e3fab..bae27d47b59 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -16,7 +16,6 @@ import ( zktypes "github.com/ledgerwatch/erigon/zk/types" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" @@ -92,7 +91,7 @@ type ZkEvmAPIImpl struct { l1Syncer *syncer.L1Syncer l2SequencerUrl string semaphores map[string]chan struct{} - datastreamServer *server.DataStreamServer + datastreamServer server.DataStreamServer } func (api *ZkEvmAPIImpl) initializeSemaphores(functionLimits map[string]int) { @@ -113,14 +112,9 @@ func NewZkEvmAPI( zkConfig *ethconfig.Config, l1Syncer *syncer.L1Syncer, l2SequencerUrl string, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, ) *ZkEvmAPIImpl { - var streamServer *server.DataStreamServer - if datastreamServer != nil { - streamServer = server.NewDataStreamServer(datastreamServer, zkConfig.Zk.L2ChainId) - } - a := &ZkEvmAPIImpl{ ethApi: base, db: db, @@ -128,7 +122,7 @@ func NewZkEvmAPI( config: zkConfig, l1Syncer: l1Syncer, l2SequencerUrl: l2SequencerUrl, - datastreamServer: streamServer, + datastreamServer: dataStreamServer, } a.initializeSemaphores(map[string]int{ diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 1a796e37250..a585503c0e0 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -3,7 +3,6 @@ package stages import ( "context" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/state" @@ -16,11 +15,12 @@ import ( "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/erigon/zk/datastream/server" + "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" zkStages "github.com/ledgerwatch/erigon/zk/stages" "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/erigon/zk/txpool" - "github.com/ledgerwatch/erigon/zk/l1infotree" ) // NewDefaultZkStages creates stages for zk syncer (RPC mode) @@ -36,7 +36,7 @@ func NewDefaultZkStages(ctx context.Context, engine consensus.Engine, l1Syncer *syncer.L1Syncer, datastreamClient zkStages.DatastreamClient, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, infoTreeUpdater *l1infotree.Updater, ) []*stagedsync.Stage { dirs := cfg.Dirs @@ -54,7 +54,7 @@ func NewDefaultZkStages(ctx context.Context, zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner), - zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), + zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( @@ -99,7 +99,7 @@ func NewSequencerZkStages(ctx context.Context, agg *state.Aggregator, forkValidator *engine_helpers.ForkValidator, engine consensus.Engine, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, sequencerStageSyncer *syncer.L1Syncer, l1Syncer *syncer.L1Syncer, l1BlockSyncer *syncer.L1Syncer, @@ -120,7 +120,7 @@ func NewSequencerZkStages(ctx context.Context, zkStages.StageL1SequencerSyncCfg(db, cfg.Zk, sequencerStageSyncer), zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), - zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), + zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), zkStages.StageSequenceBlocksCfg( db, cfg.Prune, @@ -138,7 +138,7 @@ func NewSequencerZkStages(ctx context.Context, cfg.Genesis, cfg.Sync, agg, - datastreamServer, + dataStreamServer, cfg.Zk, &cfg.Miner, txPool, diff --git a/zk/datastream/mock_services/data_stream_server_mock.go b/zk/datastream/mock_services/data_stream_server_mock.go new file mode 100644 index 00000000000..0ef44befd6d --- /dev/null +++ b/zk/datastream/mock_services/data_stream_server_mock.go @@ -0,0 +1,660 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/datastream/server (interfaces: DataStreamServer) +// +// Generated by this command: +// +// mockgen -typed=true -destination=../mocks/data_stream_server_mock.go -package=mocks . DataStreamServer +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + common "github.com/ledgerwatch/erigon-lib/common" + kv "github.com/ledgerwatch/erigon-lib/kv" + types "github.com/ledgerwatch/erigon/core/types" + server "github.com/ledgerwatch/erigon/zk/datastream/server" + types0 "github.com/ledgerwatch/erigon/zk/datastream/types" + hermez_db "github.com/ledgerwatch/erigon/zk/hermez_db" + gomock "go.uber.org/mock/gomock" +) + +// MockDataStreamServer is a mock of DataStreamServer interface. +type MockDataStreamServer struct { + ctrl *gomock.Controller + recorder *MockDataStreamServerMockRecorder +} + +// MockDataStreamServerMockRecorder is the mock recorder for MockDataStreamServer. +type MockDataStreamServerMockRecorder struct { + mock *MockDataStreamServer +} + +// NewMockDataStreamServer creates a new mock instance. +func NewMockDataStreamServer(ctrl *gomock.Controller) *MockDataStreamServer { + mock := &MockDataStreamServer{ctrl: ctrl} + mock.recorder = &MockDataStreamServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDataStreamServer) EXPECT() *MockDataStreamServerMockRecorder { + return m.recorder +} + +// GetChainId mocks base method. +func (m *MockDataStreamServer) GetChainId() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainId") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetChainId indicates an expected call of GetChainId. +func (mr *MockDataStreamServerMockRecorder) GetChainId() *MockDataStreamServerGetChainIdCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainId", reflect.TypeOf((*MockDataStreamServer)(nil).GetChainId)) + return &MockDataStreamServerGetChainIdCall{Call: call} +} + +// MockDataStreamServerGetChainIdCall wrap *gomock.Call +type MockDataStreamServerGetChainIdCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetChainIdCall) Return(arg0 uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetChainIdCall) Do(f func() uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetChainIdCall) DoAndReturn(f func() uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestBatchNumber mocks base method. +func (m *MockDataStreamServer) GetHighestBatchNumber() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestBatchNumber") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestBatchNumber indicates an expected call of GetHighestBatchNumber. +func (mr *MockDataStreamServerMockRecorder) GetHighestBatchNumber() *MockDataStreamServerGetHighestBatchNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestBatchNumber", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestBatchNumber)) + return &MockDataStreamServerGetHighestBatchNumberCall{Call: call} +} + +// MockDataStreamServerGetHighestBatchNumberCall wrap *gomock.Call +type MockDataStreamServerGetHighestBatchNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestBatchNumberCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestBatchNumberCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestBatchNumberCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestBlockNumber mocks base method. +func (m *MockDataStreamServer) GetHighestBlockNumber() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestBlockNumber") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestBlockNumber indicates an expected call of GetHighestBlockNumber. +func (mr *MockDataStreamServerMockRecorder) GetHighestBlockNumber() *MockDataStreamServerGetHighestBlockNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestBlockNumber", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestBlockNumber)) + return &MockDataStreamServerGetHighestBlockNumberCall{Call: call} +} + +// MockDataStreamServerGetHighestBlockNumberCall wrap *gomock.Call +type MockDataStreamServerGetHighestBlockNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestBlockNumberCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestBlockNumberCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestBlockNumberCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestClosedBatch mocks base method. +func (m *MockDataStreamServer) GetHighestClosedBatch() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestClosedBatch") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestClosedBatch indicates an expected call of GetHighestClosedBatch. +func (mr *MockDataStreamServerMockRecorder) GetHighestClosedBatch() *MockDataStreamServerGetHighestClosedBatchCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestClosedBatch", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestClosedBatch)) + return &MockDataStreamServerGetHighestClosedBatchCall{Call: call} +} + +// MockDataStreamServerGetHighestClosedBatchCall wrap *gomock.Call +type MockDataStreamServerGetHighestClosedBatchCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestClosedBatchCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestClosedBatchCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestClosedBatchCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestClosedBatchNoCache mocks base method. +func (m *MockDataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestClosedBatchNoCache") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestClosedBatchNoCache indicates an expected call of GetHighestClosedBatchNoCache. +func (mr *MockDataStreamServerMockRecorder) GetHighestClosedBatchNoCache() *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestClosedBatchNoCache", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestClosedBatchNoCache)) + return &MockDataStreamServerGetHighestClosedBatchNoCacheCall{Call: call} +} + +// MockDataStreamServerGetHighestClosedBatchNoCacheCall wrap *gomock.Call +type MockDataStreamServerGetHighestClosedBatchNoCacheCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetStreamServer mocks base method. +func (m *MockDataStreamServer) GetStreamServer() server.StreamServer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStreamServer") + ret0, _ := ret[0].(server.StreamServer) + return ret0 +} + +// GetStreamServer indicates an expected call of GetStreamServer. +func (mr *MockDataStreamServerMockRecorder) GetStreamServer() *MockDataStreamServerGetStreamServerCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStreamServer", reflect.TypeOf((*MockDataStreamServer)(nil).GetStreamServer)) + return &MockDataStreamServerGetStreamServerCall{Call: call} +} + +// MockDataStreamServerGetStreamServerCall wrap *gomock.Call +type MockDataStreamServerGetStreamServerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetStreamServerCall) Return(arg0 server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetStreamServerCall) Do(f func() server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetStreamServerCall) DoAndReturn(f func() server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IsLastEntryBatchEnd mocks base method. +func (m *MockDataStreamServer) IsLastEntryBatchEnd() (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsLastEntryBatchEnd") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsLastEntryBatchEnd indicates an expected call of IsLastEntryBatchEnd. +func (mr *MockDataStreamServerMockRecorder) IsLastEntryBatchEnd() *MockDataStreamServerIsLastEntryBatchEndCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLastEntryBatchEnd", reflect.TypeOf((*MockDataStreamServer)(nil).IsLastEntryBatchEnd)) + return &MockDataStreamServerIsLastEntryBatchEndCall{Call: call} +} + +// MockDataStreamServerIsLastEntryBatchEndCall wrap *gomock.Call +type MockDataStreamServerIsLastEntryBatchEndCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerIsLastEntryBatchEndCall) Return(arg0 bool, arg1 error) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerIsLastEntryBatchEndCall) Do(f func() (bool, error)) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerIsLastEntryBatchEndCall) DoAndReturn(f func() (bool, error)) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ReadBatches mocks base method. +func (m *MockDataStreamServer) ReadBatches(arg0, arg1 uint64) ([][]*types0.FullL2Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadBatches", arg0, arg1) + ret0, _ := ret[0].([][]*types0.FullL2Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadBatches indicates an expected call of ReadBatches. +func (mr *MockDataStreamServerMockRecorder) ReadBatches(arg0, arg1 any) *MockDataStreamServerReadBatchesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadBatches", reflect.TypeOf((*MockDataStreamServer)(nil).ReadBatches), arg0, arg1) + return &MockDataStreamServerReadBatchesCall{Call: call} +} + +// MockDataStreamServerReadBatchesCall wrap *gomock.Call +type MockDataStreamServerReadBatchesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerReadBatchesCall) Return(arg0 [][]*types0.FullL2Block, arg1 error) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerReadBatchesCall) Do(f func(uint64, uint64) ([][]*types0.FullL2Block, error)) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerReadBatchesCall) DoAndReturn(f func(uint64, uint64) ([][]*types0.FullL2Block, error)) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindIfNecessary mocks base method. +func (m *MockDataStreamServer) UnwindIfNecessary(arg0 string, arg1 server.DbReader, arg2, arg3, arg4 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindIfNecessary", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindIfNecessary indicates an expected call of UnwindIfNecessary. +func (mr *MockDataStreamServerMockRecorder) UnwindIfNecessary(arg0, arg1, arg2, arg3, arg4 any) *MockDataStreamServerUnwindIfNecessaryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindIfNecessary", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindIfNecessary), arg0, arg1, arg2, arg3, arg4) + return &MockDataStreamServerUnwindIfNecessaryCall{Call: call} +} + +// MockDataStreamServerUnwindIfNecessaryCall wrap *gomock.Call +type MockDataStreamServerUnwindIfNecessaryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindIfNecessaryCall) Return(arg0 error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindIfNecessaryCall) Do(f func(string, server.DbReader, uint64, uint64, uint64) error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindIfNecessaryCall) DoAndReturn(f func(string, server.DbReader, uint64, uint64, uint64) error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindToBatchStart mocks base method. +func (m *MockDataStreamServer) UnwindToBatchStart(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindToBatchStart", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindToBatchStart indicates an expected call of UnwindToBatchStart. +func (mr *MockDataStreamServerMockRecorder) UnwindToBatchStart(arg0 any) *MockDataStreamServerUnwindToBatchStartCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindToBatchStart", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindToBatchStart), arg0) + return &MockDataStreamServerUnwindToBatchStartCall{Call: call} +} + +// MockDataStreamServerUnwindToBatchStartCall wrap *gomock.Call +type MockDataStreamServerUnwindToBatchStartCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindToBatchStartCall) Return(arg0 error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindToBatchStartCall) Do(f func(uint64) error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindToBatchStartCall) DoAndReturn(f func(uint64) error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindToBlock mocks base method. +func (m *MockDataStreamServer) UnwindToBlock(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindToBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindToBlock indicates an expected call of UnwindToBlock. +func (mr *MockDataStreamServerMockRecorder) UnwindToBlock(arg0 any) *MockDataStreamServerUnwindToBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindToBlock", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindToBlock), arg0) + return &MockDataStreamServerUnwindToBlockCall{Call: call} +} + +// MockDataStreamServerUnwindToBlockCall wrap *gomock.Call +type MockDataStreamServerUnwindToBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindToBlockCall) Return(arg0 error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindToBlockCall) Do(f func(uint64) error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindToBlockCall) DoAndReturn(f func(uint64) error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBatchEnd mocks base method. +func (m *MockDataStreamServer) WriteBatchEnd(arg0 server.DbReader, arg1 uint64, arg2, arg3 *common.Hash) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchEnd", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchEnd indicates an expected call of WriteBatchEnd. +func (mr *MockDataStreamServerMockRecorder) WriteBatchEnd(arg0, arg1, arg2, arg3 any) *MockDataStreamServerWriteBatchEndCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchEnd", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBatchEnd), arg0, arg1, arg2, arg3) + return &MockDataStreamServerWriteBatchEndCall{Call: call} +} + +// MockDataStreamServerWriteBatchEndCall wrap *gomock.Call +type MockDataStreamServerWriteBatchEndCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBatchEndCall) Return(arg0 error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBatchEndCall) Do(f func(server.DbReader, uint64, *common.Hash, *common.Hash) error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBatchEndCall) DoAndReturn(f func(server.DbReader, uint64, *common.Hash, *common.Hash) error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBlockWithBatchStartToStream mocks base method. +func (m *MockDataStreamServer) WriteBlockWithBatchStartToStream(arg0 string, arg1 kv.Tx, arg2 server.DbReader, arg3, arg4, arg5 uint64, arg6, arg7 types.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBlockWithBatchStartToStream", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBlockWithBatchStartToStream indicates an expected call of WriteBlockWithBatchStartToStream. +func (mr *MockDataStreamServerMockRecorder) WriteBlockWithBatchStartToStream(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBlockWithBatchStartToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBlockWithBatchStartToStream), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return &MockDataStreamServerWriteBlockWithBatchStartToStreamCall{Call: call} +} + +// MockDataStreamServerWriteBlockWithBatchStartToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteBlockWithBatchStartToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) Return(arg0 error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) Do(f func(string, kv.Tx, server.DbReader, uint64, uint64, uint64, types.Block, types.Block) error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) DoAndReturn(f func(string, kv.Tx, server.DbReader, uint64, uint64, uint64, types.Block, types.Block) error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBlocksToStreamConsecutively mocks base method. +func (m *MockDataStreamServer) WriteBlocksToStreamConsecutively(arg0 context.Context, arg1 string, arg2 kv.Tx, arg3 server.DbReader, arg4, arg5 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBlocksToStreamConsecutively", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBlocksToStreamConsecutively indicates an expected call of WriteBlocksToStreamConsecutively. +func (mr *MockDataStreamServerMockRecorder) WriteBlocksToStreamConsecutively(arg0, arg1, arg2, arg3, arg4, arg5 any) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBlocksToStreamConsecutively", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBlocksToStreamConsecutively), arg0, arg1, arg2, arg3, arg4, arg5) + return &MockDataStreamServerWriteBlocksToStreamConsecutivelyCall{Call: call} +} + +// MockDataStreamServerWriteBlocksToStreamConsecutivelyCall wrap *gomock.Call +type MockDataStreamServerWriteBlocksToStreamConsecutivelyCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) Return(arg0 error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) Do(f func(context.Context, string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) DoAndReturn(f func(context.Context, string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteGenesisToStream mocks base method. +func (m *MockDataStreamServer) WriteGenesisToStream(arg0 *types.Block, arg1 *hermez_db.HermezDbReader, arg2 kv.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteGenesisToStream", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteGenesisToStream indicates an expected call of WriteGenesisToStream. +func (mr *MockDataStreamServerMockRecorder) WriteGenesisToStream(arg0, arg1, arg2 any) *MockDataStreamServerWriteGenesisToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGenesisToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteGenesisToStream), arg0, arg1, arg2) + return &MockDataStreamServerWriteGenesisToStreamCall{Call: call} +} + +// MockDataStreamServerWriteGenesisToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteGenesisToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteGenesisToStreamCall) Return(arg0 error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteGenesisToStreamCall) Do(f func(*types.Block, *hermez_db.HermezDbReader, kv.Tx) error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteGenesisToStreamCall) DoAndReturn(f func(*types.Block, *hermez_db.HermezDbReader, kv.Tx) error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteWholeBatchToStream mocks base method. +func (m *MockDataStreamServer) WriteWholeBatchToStream(arg0 string, arg1 kv.Tx, arg2 server.DbReader, arg3, arg4 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteWholeBatchToStream", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteWholeBatchToStream indicates an expected call of WriteWholeBatchToStream. +func (mr *MockDataStreamServerMockRecorder) WriteWholeBatchToStream(arg0, arg1, arg2, arg3, arg4 any) *MockDataStreamServerWriteWholeBatchToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteWholeBatchToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteWholeBatchToStream), arg0, arg1, arg2, arg3, arg4) + return &MockDataStreamServerWriteWholeBatchToStreamCall{Call: call} +} + +// MockDataStreamServerWriteWholeBatchToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteWholeBatchToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) Return(arg0 error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) Do(f func(string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) DoAndReturn(f func(string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/datastream/mock_services/stream_server_mock.go b/zk/datastream/mock_services/stream_server_mock.go new file mode 100644 index 00000000000..27d287a223c --- /dev/null +++ b/zk/datastream/mock_services/stream_server_mock.go @@ -0,0 +1,576 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/datastream/server (interfaces: StreamServer) +// +// Generated by this command: +// +// mockgen -typed=true -destination=../mocks/stream_server_mock.go -package=mocks . StreamServer +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + gomock "go.uber.org/mock/gomock" +) + +// MockStreamServer is a mock of StreamServer interface. +type MockStreamServer struct { + ctrl *gomock.Controller + recorder *MockStreamServerMockRecorder +} + +// MockStreamServerMockRecorder is the mock recorder for MockStreamServer. +type MockStreamServerMockRecorder struct { + mock *MockStreamServer +} + +// NewMockStreamServer creates a new mock instance. +func NewMockStreamServer(ctrl *gomock.Controller) *MockStreamServer { + mock := &MockStreamServer{ctrl: ctrl} + mock.recorder = &MockStreamServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStreamServer) EXPECT() *MockStreamServerMockRecorder { + return m.recorder +} + +// AddStreamBookmark mocks base method. +func (m *MockStreamServer) AddStreamBookmark(arg0 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStreamBookmark", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddStreamBookmark indicates an expected call of AddStreamBookmark. +func (mr *MockStreamServerMockRecorder) AddStreamBookmark(arg0 any) *MockStreamServerAddStreamBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStreamBookmark", reflect.TypeOf((*MockStreamServer)(nil).AddStreamBookmark), arg0) + return &MockStreamServerAddStreamBookmarkCall{Call: call} +} + +// MockStreamServerAddStreamBookmarkCall wrap *gomock.Call +type MockStreamServerAddStreamBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerAddStreamBookmarkCall) Return(arg0 uint64, arg1 error) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerAddStreamBookmarkCall) Do(f func([]byte) (uint64, error)) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerAddStreamBookmarkCall) DoAndReturn(f func([]byte) (uint64, error)) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddStreamEntry mocks base method. +func (m *MockStreamServer) AddStreamEntry(arg0 datastreamer.EntryType, arg1 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStreamEntry", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddStreamEntry indicates an expected call of AddStreamEntry. +func (mr *MockStreamServerMockRecorder) AddStreamEntry(arg0, arg1 any) *MockStreamServerAddStreamEntryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStreamEntry", reflect.TypeOf((*MockStreamServer)(nil).AddStreamEntry), arg0, arg1) + return &MockStreamServerAddStreamEntryCall{Call: call} +} + +// MockStreamServerAddStreamEntryCall wrap *gomock.Call +type MockStreamServerAddStreamEntryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerAddStreamEntryCall) Return(arg0 uint64, arg1 error) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerAddStreamEntryCall) Do(f func(datastreamer.EntryType, []byte) (uint64, error)) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerAddStreamEntryCall) DoAndReturn(f func(datastreamer.EntryType, []byte) (uint64, error)) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// BookmarkPrintDump mocks base method. +func (m *MockStreamServer) BookmarkPrintDump() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BookmarkPrintDump") +} + +// BookmarkPrintDump indicates an expected call of BookmarkPrintDump. +func (mr *MockStreamServerMockRecorder) BookmarkPrintDump() *MockStreamServerBookmarkPrintDumpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BookmarkPrintDump", reflect.TypeOf((*MockStreamServer)(nil).BookmarkPrintDump)) + return &MockStreamServerBookmarkPrintDumpCall{Call: call} +} + +// MockStreamServerBookmarkPrintDumpCall wrap *gomock.Call +type MockStreamServerBookmarkPrintDumpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerBookmarkPrintDumpCall) Return() *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerBookmarkPrintDumpCall) Do(f func()) *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerBookmarkPrintDumpCall) DoAndReturn(f func()) *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CommitAtomicOp mocks base method. +func (m *MockStreamServer) CommitAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitAtomicOp indicates an expected call of CommitAtomicOp. +func (mr *MockStreamServerMockRecorder) CommitAtomicOp() *MockStreamServerCommitAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).CommitAtomicOp)) + return &MockStreamServerCommitAtomicOpCall{Call: call} +} + +// MockStreamServerCommitAtomicOpCall wrap *gomock.Call +type MockStreamServerCommitAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerCommitAtomicOpCall) Return(arg0 error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerCommitAtomicOpCall) Do(f func() error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerCommitAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetBookmark mocks base method. +func (m *MockStreamServer) GetBookmark(arg0 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBookmark", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBookmark indicates an expected call of GetBookmark. +func (mr *MockStreamServerMockRecorder) GetBookmark(arg0 any) *MockStreamServerGetBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBookmark", reflect.TypeOf((*MockStreamServer)(nil).GetBookmark), arg0) + return &MockStreamServerGetBookmarkCall{Call: call} +} + +// MockStreamServerGetBookmarkCall wrap *gomock.Call +type MockStreamServerGetBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetBookmarkCall) Return(arg0 uint64, arg1 error) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetBookmarkCall) Do(f func([]byte) (uint64, error)) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetBookmarkCall) DoAndReturn(f func([]byte) (uint64, error)) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetDataBetweenBookmarks mocks base method. +func (m *MockStreamServer) GetDataBetweenBookmarks(arg0, arg1 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataBetweenBookmarks", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataBetweenBookmarks indicates an expected call of GetDataBetweenBookmarks. +func (mr *MockStreamServerMockRecorder) GetDataBetweenBookmarks(arg0, arg1 any) *MockStreamServerGetDataBetweenBookmarksCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataBetweenBookmarks", reflect.TypeOf((*MockStreamServer)(nil).GetDataBetweenBookmarks), arg0, arg1) + return &MockStreamServerGetDataBetweenBookmarksCall{Call: call} +} + +// MockStreamServerGetDataBetweenBookmarksCall wrap *gomock.Call +type MockStreamServerGetDataBetweenBookmarksCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetDataBetweenBookmarksCall) Return(arg0 []byte, arg1 error) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetDataBetweenBookmarksCall) Do(f func([]byte, []byte) ([]byte, error)) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetDataBetweenBookmarksCall) DoAndReturn(f func([]byte, []byte) ([]byte, error)) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetEntry mocks base method. +func (m *MockStreamServer) GetEntry(arg0 uint64) (datastreamer.FileEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEntry", arg0) + ret0, _ := ret[0].(datastreamer.FileEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEntry indicates an expected call of GetEntry. +func (mr *MockStreamServerMockRecorder) GetEntry(arg0 any) *MockStreamServerGetEntryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEntry", reflect.TypeOf((*MockStreamServer)(nil).GetEntry), arg0) + return &MockStreamServerGetEntryCall{Call: call} +} + +// MockStreamServerGetEntryCall wrap *gomock.Call +type MockStreamServerGetEntryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetEntryCall) Return(arg0 datastreamer.FileEntry, arg1 error) *MockStreamServerGetEntryCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetEntryCall) Do(f func(uint64) (datastreamer.FileEntry, error)) *MockStreamServerGetEntryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetEntryCall) DoAndReturn(f func(uint64) (datastreamer.FileEntry, error)) *MockStreamServerGetEntryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetFirstEventAfterBookmark mocks base method. +func (m *MockStreamServer) GetFirstEventAfterBookmark(arg0 []byte) (datastreamer.FileEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFirstEventAfterBookmark", arg0) + ret0, _ := ret[0].(datastreamer.FileEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFirstEventAfterBookmark indicates an expected call of GetFirstEventAfterBookmark. +func (mr *MockStreamServerMockRecorder) GetFirstEventAfterBookmark(arg0 any) *MockStreamServerGetFirstEventAfterBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFirstEventAfterBookmark", reflect.TypeOf((*MockStreamServer)(nil).GetFirstEventAfterBookmark), arg0) + return &MockStreamServerGetFirstEventAfterBookmarkCall{Call: call} +} + +// MockStreamServerGetFirstEventAfterBookmarkCall wrap *gomock.Call +type MockStreamServerGetFirstEventAfterBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) Return(arg0 datastreamer.FileEntry, arg1 error) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) Do(f func([]byte) (datastreamer.FileEntry, error)) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) DoAndReturn(f func([]byte) (datastreamer.FileEntry, error)) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHeader mocks base method. +func (m *MockStreamServer) GetHeader() datastreamer.HeaderEntry { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader") + ret0, _ := ret[0].(datastreamer.HeaderEntry) + return ret0 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockStreamServerMockRecorder) GetHeader() *MockStreamServerGetHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockStreamServer)(nil).GetHeader)) + return &MockStreamServerGetHeaderCall{Call: call} +} + +// MockStreamServerGetHeaderCall wrap *gomock.Call +type MockStreamServerGetHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetHeaderCall) Return(arg0 datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetHeaderCall) Do(f func() datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetHeaderCall) DoAndReturn(f func() datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RollbackAtomicOp mocks base method. +func (m *MockStreamServer) RollbackAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RollbackAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// RollbackAtomicOp indicates an expected call of RollbackAtomicOp. +func (mr *MockStreamServerMockRecorder) RollbackAtomicOp() *MockStreamServerRollbackAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).RollbackAtomicOp)) + return &MockStreamServerRollbackAtomicOpCall{Call: call} +} + +// MockStreamServerRollbackAtomicOpCall wrap *gomock.Call +type MockStreamServerRollbackAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerRollbackAtomicOpCall) Return(arg0 error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerRollbackAtomicOpCall) Do(f func() error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerRollbackAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Start mocks base method. +func (m *MockStreamServer) Start() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start") + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockStreamServerMockRecorder) Start() *MockStreamServerStartCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockStreamServer)(nil).Start)) + return &MockStreamServerStartCall{Call: call} +} + +// MockStreamServerStartCall wrap *gomock.Call +type MockStreamServerStartCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerStartCall) Return(arg0 error) *MockStreamServerStartCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerStartCall) Do(f func() error) *MockStreamServerStartCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerStartCall) DoAndReturn(f func() error) *MockStreamServerStartCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// StartAtomicOp mocks base method. +func (m *MockStreamServer) StartAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// StartAtomicOp indicates an expected call of StartAtomicOp. +func (mr *MockStreamServerMockRecorder) StartAtomicOp() *MockStreamServerStartAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).StartAtomicOp)) + return &MockStreamServerStartAtomicOpCall{Call: call} +} + +// MockStreamServerStartAtomicOpCall wrap *gomock.Call +type MockStreamServerStartAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerStartAtomicOpCall) Return(arg0 error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerStartAtomicOpCall) Do(f func() error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerStartAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TruncateFile mocks base method. +func (m *MockStreamServer) TruncateFile(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TruncateFile", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// TruncateFile indicates an expected call of TruncateFile. +func (mr *MockStreamServerMockRecorder) TruncateFile(arg0 any) *MockStreamServerTruncateFileCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TruncateFile", reflect.TypeOf((*MockStreamServer)(nil).TruncateFile), arg0) + return &MockStreamServerTruncateFileCall{Call: call} +} + +// MockStreamServerTruncateFileCall wrap *gomock.Call +type MockStreamServerTruncateFileCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerTruncateFileCall) Return(arg0 error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerTruncateFileCall) Do(f func(uint64) error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerTruncateFileCall) DoAndReturn(f func(uint64) error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdateEntryData mocks base method. +func (m *MockStreamServer) UpdateEntryData(arg0 uint64, arg1 datastreamer.EntryType, arg2 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEntryData", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateEntryData indicates an expected call of UpdateEntryData. +func (mr *MockStreamServerMockRecorder) UpdateEntryData(arg0, arg1, arg2 any) *MockStreamServerUpdateEntryDataCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEntryData", reflect.TypeOf((*MockStreamServer)(nil).UpdateEntryData), arg0, arg1, arg2) + return &MockStreamServerUpdateEntryDataCall{Call: call} +} + +// MockStreamServerUpdateEntryDataCall wrap *gomock.Call +type MockStreamServerUpdateEntryDataCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerUpdateEntryDataCall) Return(arg0 error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerUpdateEntryDataCall) Do(f func(uint64, datastreamer.EntryType, []byte) error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerUpdateEntryDataCall) DoAndReturn(f func(uint64, datastreamer.EntryType, []byte) error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 93eb3c6c27c..9fb48630ca8 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -2,8 +2,10 @@ package server import ( "fmt" + "time" "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + dslog "github.com/0xPolygonHermez/zkevm-data-streamer/log" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" @@ -41,9 +43,9 @@ const ( EtrogBatchNumber = 7 ) -type DataStreamServer struct { - stream *datastreamer.StreamServer - chainId uint64 +type ZkEVMDataStreamServer struct { + streamServer StreamServer + chainId uint64 highestBlockWritten, highestClosedBatchWritten, highestBatchWritten *uint64 @@ -59,16 +61,31 @@ type DataStreamEntryProto interface { Type() types.EntryType } -func NewDataStreamServer(stream *datastreamer.StreamServer, chainId uint64) *DataStreamServer { - return &DataStreamServer{ - stream: stream, +type ZkEVMDataStreamServerFactory struct { +} + +func NewZkEVMDataStreamServerFactory() *ZkEVMDataStreamServerFactory { + return &ZkEVMDataStreamServerFactory{} +} + +func (f *ZkEVMDataStreamServerFactory) CreateStreamServer(port uint16, version uint8, systemID uint64, streamType datastreamer.StreamType, fileName string, writeTimeout time.Duration, inactivityTimeout time.Duration, inactivityCheckInterval time.Duration, cfg *dslog.Config) (StreamServer, error) { + return datastreamer.NewServer(port, version, systemID, streamType, fileName, writeTimeout, inactivityTimeout, inactivityCheckInterval, cfg) +} + +func (f *ZkEVMDataStreamServerFactory) CreateDataStreamServer(streamServer StreamServer, chainId uint64) DataStreamServer { + return &ZkEVMDataStreamServer{ + streamServer: streamServer, chainId: chainId, highestBlockWritten: nil, highestBatchWritten: nil, } } -func (srv *DataStreamServer) GetChainId() uint64 { +func (srv *ZkEVMDataStreamServer) GetStreamServer() StreamServer { + return srv.streamServer +} + +func (srv *ZkEVMDataStreamServer) GetChainId() uint64 { return srv.chainId } @@ -121,8 +138,8 @@ func NewDataStreamEntries(size int) *DataStreamEntries { } } -func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { - if err := srv.stream.CommitAtomicOp(); err != nil { +func (srv *ZkEVMDataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { + if err := srv.streamServer.CommitAtomicOp(); err != nil { return err } @@ -147,7 +164,7 @@ func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, late return nil } -func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { +func (srv *ZkEVMDataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { for _, entry := range entries { entryType := entry.Type() @@ -157,11 +174,11 @@ func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntr } if entryType == types.BookmarkEntryType { - if _, err = srv.stream.AddStreamBookmark(em); err != nil { + if _, err = srv.streamServer.AddStreamBookmark(em); err != nil { return err } } else { - if _, err = srv.stream.AddStreamEntry(datastreamer.EntryType(entryType), em); err != nil { + if _, err = srv.streamServer.AddStreamEntry(datastreamer.EntryType(entryType), em); err != nil { return err } } @@ -434,8 +451,8 @@ func BuildWholeBatchStreamEntriesProto( return allEntries, nil } -func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) { - header := srv.stream.GetHeader() +func (srv *ZkEVMDataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) { + header := srv.streamServer.GetHeader() if header.TotalEntries == 0 { return false, nil @@ -444,7 +461,7 @@ func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) //find end block entry to delete from it onward entryNum := header.TotalEntries - 1 var entry datastreamer.FileEntry - entry, err = srv.stream.GetEntry(entryNum) + entry, err = srv.streamServer.GetEntry(entryNum) if err != nil { return false, err } @@ -452,12 +469,12 @@ func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) return uint32(entry.Type) == uint32(types.EntryTypeBatchEnd), nil } -func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestBlockNumber() (uint64, error) { if srv.highestBlockWritten != nil { return *srv.highestBlockWritten, nil } - header := srv.stream.GetHeader() + header := srv.streamServer.GetHeader() if header.TotalEntries == 0 { return 0, nil @@ -468,7 +485,7 @@ func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { var err error var entry datastreamer.FileEntry for { - entry, err = srv.stream.GetEntry(entryNum) + entry, err = srv.streamServer.GetEntry(entryNum) if err != nil { return 0, err } @@ -497,7 +514,7 @@ func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { return 0, nil } -func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestBatchNumber() (uint64, error) { if srv.highestBatchWritten != nil { return *srv.highestBatchWritten, nil } @@ -520,7 +537,7 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return batch.Number, nil } -func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestClosedBatch() (uint64, error) { if srv.highestClosedBatchWritten != nil { return *srv.highestClosedBatchWritten, nil } @@ -535,7 +552,7 @@ func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { return number, nil } -func (srv *DataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchEnd)) if err != nil { return 0, err @@ -555,7 +572,7 @@ func (srv *DataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { // must be done on offline server // finds the position of the block bookmark entry and deletes from it onward // blockNumber 10 would return the stream to before block 10 bookmark -func (srv *DataStreamServer) UnwindToBlock(blockNumber uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindToBlock(blockNumber uint64) error { // check if server is online // find blockend entry @@ -564,18 +581,18 @@ func (srv *DataStreamServer) UnwindToBlock(blockNumber uint64) error { if err != nil { return err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return err } - return srv.stream.TruncateFile(entryNum) + return srv.streamServer.TruncateFile(entryNum) } // must be done on offline server // finds the position of the endBlock entry for the given number // and unwinds the datastream file to it -func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindToBatchStart(batchNumber uint64) error { // check if server is online // find blockend entry @@ -584,21 +601,21 @@ func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { if err != nil { return err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return err } - return srv.stream.TruncateFile(entryNum) + return srv.streamServer.TruncateFile(entryNum) } -func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { - header := srv.stream.GetHeader() +func (srv *ZkEVMDataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { + header := srv.streamServer.GetHeader() emtryEntry := datastreamer.FileEntry{} // loop will become infinite if using unsigned type for entryNum := int64(header.TotalEntries - 1); entryNum >= 0; entryNum-- { - entry, err := srv.stream.GetEntry(uint64(entryNum)) + entry, err := srv.streamServer.GetEntry(uint64(entryNum)) if err != nil { return emtryEntry, false, err } @@ -611,12 +628,12 @@ func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType } type dataStreamServerIterator struct { - stream *datastreamer.StreamServer + stream StreamServer curEntryNum uint64 header uint64 } -func newDataStreamServerIterator(stream *datastreamer.StreamServer, start uint64) *dataStreamServerIterator { +func newDataStreamServerIterator(stream StreamServer, start uint64) *dataStreamServerIterator { return &dataStreamServerIterator{ stream: stream, curEntryNum: start, @@ -650,20 +667,20 @@ func (it *dataStreamServerIterator) NextFileEntry() (entry *types.FileEntry, err }, nil } -func (srv *DataStreamServer) ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) { +func (srv *ZkEVMDataStreamServer) ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) { bookmark := types.NewBookmarkProto(start, datastream.BookmarkType_BOOKMARK_TYPE_BATCH) marshalled, err := bookmark.Marshal() if err != nil { return nil, err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return nil, err } - iterator := newDataStreamServerIterator(srv.stream, entryNum) + iterator := newDataStreamServerIterator(srv.streamServer, entryNum) return ReadBatches(iterator, start, end) } diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index 2e0ee750e83..68544d94b83 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -10,11 +10,11 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) const ( @@ -28,7 +28,7 @@ const ( // basically writes a whole standalone batch // plus the GER updates if the batch gap is > 1 // starts atomicOp and commits it internally -func (srv *DataStreamServer) WriteWholeBatchToStream( +func (srv *ZkEVMDataStreamServer) WriteWholeBatchToStream( logPrefix string, tx kv.Tx, reader DbReader, @@ -55,10 +55,10 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() blocks := make([]eritypes.Block, 0) txsPerBlock := make(map[uint64][]eritypes.Transaction) @@ -91,7 +91,7 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( // writes consecutively blocks from-to // checks for all batch related stuff in the meantime - batch start, batche end, etc // starts atomicOp and commits it internally -func (srv *DataStreamServer) WriteBlocksToStreamConsecutively( +func (srv *ZkEVMDataStreamServer) WriteBlocksToStreamConsecutively( ctx context.Context, logPrefix string, tx kv.Tx, @@ -122,10 +122,10 @@ func (srv *DataStreamServer) WriteBlocksToStreamConsecutively( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() // check if a new batch starts and the old needs closing before that // if it is already closed with a batch end, do not add a new batch end @@ -201,10 +201,10 @@ LOOP: return err } entries = make([]DataStreamEntryProto, 0, insertEntryCount) - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.streamServer.CommitAtomicOp(); err != nil { return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } } @@ -224,7 +224,7 @@ LOOP: // gets other needed data from the reader // writes a batchBookmark and batch start (if needed), block bookmark, block and txs in it // basically a full standalone block -func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( +func (srv *ZkEVMDataStreamServer) WriteBlockWithBatchStartToStream( logPrefix string, tx kv.Tx, reader DbReader, @@ -241,10 +241,10 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() // if start of new batch add batch start entries var batchStartEntries *DataStreamEntries @@ -285,7 +285,7 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( // if there is something, try to unwind it // in the unwind chek if the block is at batch start // if it is - unwind to previous batch's end, so it deletes batch stat of current batch as well -func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error { // if from is higher than the last datastream block number - unwind the stream highestDatastreamBlock, err := srv.GetHighestBlockNumber() if err != nil { @@ -323,7 +323,7 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader return nil } -func (srv *DataStreamServer) WriteBatchEnd( +func (srv *ZkEVMDataStreamServer) WriteBatchEnd( reader DbReader, batchNumber uint64, stateRoot *common.Hash, @@ -339,10 +339,10 @@ func (srv *DataStreamServer) WriteBatchEnd( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() batchEndEntries, err := addBatchEndEntriesProto(batchNumber, stateRoot, gers, localExitRoot) if err != nil { @@ -361,7 +361,7 @@ func (srv *DataStreamServer) WriteBatchEnd( return nil } -func (srv *DataStreamServer) WriteGenesisToStream( +func (srv *ZkEVMDataStreamServer) WriteGenesisToStream( genesis *eritypes.Block, reader *hermez_db.HermezDbReader, tx kv.Tx, @@ -376,11 +376,11 @@ func (srv *DataStreamServer) WriteGenesisToStream( return err } - err = srv.stream.StartAtomicOp() + err = srv.streamServer.StartAtomicOp() if err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() batchBookmark := newBatchBookmarkEntryProto(genesis.NumberU64()) l2BlockBookmark := newL2BlockBookmarkEntryProto(genesis.NumberU64()) diff --git a/zk/datastream/server/interfaces.go b/zk/datastream/server/interfaces.go new file mode 100644 index 00000000000..ddc1a71d5c0 --- /dev/null +++ b/zk/datastream/server/interfaces.go @@ -0,0 +1,58 @@ +package server + +import ( + "context" + "time" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + dslog "github.com/0xPolygonHermez/zkevm-data-streamer/log" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/zk/datastream/types" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +//go:generate mockgen -typed=true -destination=../mocks/stream_server_mock.go -package=mocks . StreamServer +//go:generate mockgen -typed=true -destination=../mocks/data_stream_server_mock.go -package=mocks . DataStreamServer + +type StreamServer interface { + Start() error + StartAtomicOp() error + AddStreamEntry(etype datastreamer.EntryType, data []byte) (uint64, error) + AddStreamBookmark(bookmark []byte) (uint64, error) + CommitAtomicOp() error + RollbackAtomicOp() error + TruncateFile(entryNum uint64) error + UpdateEntryData(entryNum uint64, etype datastreamer.EntryType, data []byte) error + GetHeader() datastreamer.HeaderEntry + GetEntry(entryNum uint64) (datastreamer.FileEntry, error) + GetBookmark(bookmark []byte) (uint64, error) + GetFirstEventAfterBookmark(bookmark []byte) (datastreamer.FileEntry, error) + GetDataBetweenBookmarks(bookmarkFrom, bookmarkTo []byte) ([]byte, error) + BookmarkPrintDump() +} + +type DataStreamServer interface { + GetStreamServer() StreamServer + GetChainId() uint64 + IsLastEntryBatchEnd() (isBatchEnd bool, err error) + GetHighestBlockNumber() (uint64, error) + GetHighestBatchNumber() (uint64, error) + GetHighestClosedBatch() (uint64, error) + GetHighestClosedBatchNoCache() (uint64, error) + UnwindToBlock(blockNumber uint64) error + UnwindToBatchStart(batchNumber uint64) error + ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) + WriteWholeBatchToStream(logPrefix string, tx kv.Tx, reader DbReader, prevBatchNum, batchNum uint64) error + WriteBlocksToStreamConsecutively(ctx context.Context, logPrefix string, tx kv.Tx, reader DbReader, from, to uint64) error + WriteBlockWithBatchStartToStream(logPrefix string, tx kv.Tx, reader DbReader, forkId, batchNum, prevBlockBatchNum uint64, prevBlock, block eritypes.Block) (err error) + UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error + WriteBatchEnd(reader DbReader, batchNumber uint64, stateRoot *common.Hash, localExitRoot *common.Hash) (err error) + WriteGenesisToStream(genesis *eritypes.Block, reader *hermez_db.HermezDbReader, tx kv.Tx) error +} + +type DataStreamServerFactory interface { + CreateStreamServer(port uint16, version uint8, systemID uint64, streamType datastreamer.StreamType, fileName string, writeTimeout time.Duration, inactivityTimeout time.Duration, inactivityCheckInterval time.Duration, cfg *dslog.Config) (StreamServer, error) + CreateDataStreamServer(stream StreamServer, chainId uint64) DataStreamServer +} diff --git a/zk/debug_tools/datastream-host/main.go b/zk/debug_tools/datastream-host/main.go index 003133c617a..41c9faa879f 100644 --- a/zk/debug_tools/datastream-host/main.go +++ b/zk/debug_tools/datastream-host/main.go @@ -9,9 +9,13 @@ import ( "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" log2 "github.com/0xPolygonHermez/zkevm-data-streamer/log" + "github.com/ledgerwatch/erigon/zk/datastream/server" ) -var file = "" +var ( + file = "" + dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() +) func main() { flag.StringVar(&file, "file", "", "datastream file") @@ -23,7 +27,7 @@ func main() { Outputs: []string{"stdout"}, } - stream, err := datastreamer.NewServer(uint16(6900), uint8(3), 1, datastreamer.StreamType(1), file, 5*time.Second, 10*time.Second, 60*time.Second, logConfig) + stream, err := dataStreamServerFactory.CreateStreamServer(uint16(6900), uint8(3), 1, datastreamer.StreamType(1), file, 5*time.Second, 10*time.Second, 60*time.Second, logConfig) if err != nil { fmt.Println("Error creating datastream server:", err) return diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 9415e4ea857..17f5446bf40 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -11,8 +11,6 @@ import ( "errors" "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" - "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" @@ -123,7 +121,7 @@ type LegacyExecutorVerifier struct { executorNumber int cancelAllVerifications atomic.Bool - streamServer *server.DataStreamServer + streamServer server.DataStreamServer WitnessGenerator WitnessGenerator promises []*Promise[*VerifierBundle] @@ -133,12 +131,10 @@ type LegacyExecutorVerifier struct { func NewLegacyExecutorVerifier( cfg ethconfig.Zk, executors []*Executor, - chainCfg *chain.Config, db kv.RwDB, witnessGenerator WitnessGenerator, - stream *datastreamer.StreamServer, + streamServer server.DataStreamServer, ) *LegacyExecutorVerifier { - streamServer := server.NewDataStreamServer(stream, chainCfg.ChainID.Uint64()) return &LegacyExecutorVerifier{ db: db, cfg: cfg, diff --git a/zk/stages/stage_dataStreamCatchup.go b/zk/stages/stage_data_stream_catch_up.go similarity index 82% rename from zk/stages/stage_dataStreamCatchup.go rename to zk/stages/stage_data_stream_catch_up.go index 1b43a0c5681..0fd50d59029 100644 --- a/zk/stages/stage_dataStreamCatchup.go +++ b/zk/stages/stage_data_stream_catch_up.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -16,20 +15,18 @@ import ( ) type DataStreamCatchupCfg struct { - db kv.RwDB - stream *datastreamer.StreamServer - chainId uint64 - streamVersion int - hasExecutors bool + db kv.RwDB + dataStreamServer server.DataStreamServer + streamVersion int + hasExecutors bool } -func StageDataStreamCatchupCfg(stream *datastreamer.StreamServer, db kv.RwDB, chainId uint64, streamVersion int, hasExecutors bool) DataStreamCatchupCfg { +func StageDataStreamCatchupCfg(dataStreamServer server.DataStreamServer, db kv.RwDB, chainId uint64, streamVersion int, hasExecutors bool) DataStreamCatchupCfg { return DataStreamCatchupCfg{ - stream: stream, - db: db, - chainId: chainId, - streamVersion: streamVersion, - hasExecutors: hasExecutors, + dataStreamServer: dataStreamServer, + db: db, + streamVersion: streamVersion, + hasExecutors: hasExecutors, } } @@ -41,9 +38,8 @@ func SpawnStageDataStreamCatchup( ) error { logPrefix := s.LogPrefix() log.Info(fmt.Sprintf("[%s] Starting...", logPrefix)) - stream := cfg.stream - if stream == nil { + if cfg.dataStreamServer == nil { // skip the stage if there is no streamer provided log.Info(fmt.Sprintf("[%s] no streamer provided, skipping stage", logPrefix)) return nil @@ -61,7 +57,7 @@ func SpawnStageDataStreamCatchup( createdTx = true } - finalBlockNumber, err := CatchupDatastream(ctx, logPrefix, tx, stream, cfg.chainId) + finalBlockNumber, err := CatchupDatastream(ctx, logPrefix, tx, cfg.dataStreamServer) if err != nil { return err } @@ -77,8 +73,7 @@ func SpawnStageDataStreamCatchup( return err } -func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, stream *datastreamer.StreamServer, chainId uint64) (uint64, error) { - srv := server.NewDataStreamServer(stream, chainId) +func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, srv server.DataStreamServer) (uint64, error) { reader := hermez_db.NewHermezDbReader(tx) var ( @@ -122,7 +117,7 @@ func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, stream // a quick check that we haven't written anything to the stream yet. Stage progress is a little misleading // for genesis as we are in fact at block 0 here! Getting the header has some performance overhead, so // we only want to do this when we know the previous progress is 0. - header := stream.GetHeader() + header := srv.GetStreamServer().GetHeader() if header.TotalEntries == 0 { genesis, err := rawdb.ReadBlockByNumber(tx, 0) if err != nil { diff --git a/zk/stages/stage_data_stream_catch_up_test.go b/zk/stages/stage_data_stream_catch_up_test.go new file mode 100644 index 00000000000..85f8b57b345 --- /dev/null +++ b/zk/stages/stage_data_stream_catch_up_test.go @@ -0,0 +1,99 @@ +package stages + +import ( + "context" + "math/big" + "os" + "testing" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/smt/pkg/db" + mocks "github.com/ledgerwatch/erigon/zk/datastream/mock_services" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnStageDataStreamCatchup(t *testing.T) { + // Arrange + os.Setenv("CDK_ERIGON_SEQUENCER", "1") + + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx1 := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx1) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx1) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.DataStream, BlockNumber: 0} + + hDB := hermez_db.NewHermezDb(tx1) + + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + + genesisHeader := &types.Header{ + Number: big.NewInt(0), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 8000000, + GasUsed: 0, + ParentHash: common.HexToHash("0x1"), + TxHash: common.HexToHash("0x2"), + ReceiptHash: common.HexToHash("0x3"), + } + + txs := []types.Transaction{} + uncles := []*types.Header{} + receipts := []*types.Receipt{} + withdrawals := []*types.Withdrawal{} + + genesisBlock := types.NewBlock(genesisHeader, txs, uncles, receipts, withdrawals) + + err = rawdb.WriteBlock(tx1, genesisBlock) + require.NoError(t, err) + err = rawdb.WriteCanonicalHash(tx1, genesisBlock.Hash(), genesisBlock.NumberU64()) + require.NoError(t, err) + + err = stages.SaveStageProgress(tx1, stages.DataStream, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx1, stages.Execution, 20) + require.NoError(t, err) + + chainID := uint64(1) + streamVersion := 1 + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + streamServerMock := mocks.NewMockStreamServer(mockCtrl) + dataStreamServerMock := mocks.NewMockDataStreamServer(mockCtrl) + + streamServerHeader := datastreamer.HeaderEntry{TotalEntries: 0} + streamServerMock.EXPECT().GetHeader().Return(streamServerHeader) + + dataStreamServerMock.EXPECT().GetHighestBlockNumber().Return(uint64(0), nil) + dataStreamServerMock.EXPECT().GetStreamServer().Return(streamServerMock) + + hDBReaderMatcher := gomock.AssignableToTypeOf(&hermez_db.HermezDbReader{}) + + dataStreamServerMock.EXPECT().WriteGenesisToStream(gomock.Cond(func(x any) bool { + return x.(*types.Block).Hash() == genesisBlock.Hash() + }), hDBReaderMatcher, tx1).Return(nil) + + dataStreamServerMock.EXPECT().WriteBlocksToStreamConsecutively(ctx, s.LogPrefix(), tx1, hDBReaderMatcher, uint64(1), uint64(20)).Return(nil) + + cfg := StageDataStreamCatchupCfg(dataStreamServerMock, db1, chainID, streamVersion, true) + + // Act + err = SpawnStageDataStreamCatchup(s, ctx, tx1, cfg) + require.NoError(t, err) + + // Assert + +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 3d06f8af642..3673d90f8cb 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -40,7 +40,7 @@ func SpawnSequencingStage( return err } - highestBatchInDs, err := cfg.datastreamServer.GetHighestBatchNumber() + highestBatchInDs, err := cfg.dataStreamServer.GetHighestBatchNumber() if err != nil { return err } @@ -125,7 +125,7 @@ func sequencingBatchStep( return err } - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { + if err = cfg.dataStreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { return err } if err = stages.SaveStageProgress(sdb.tx, stages.DataStream, 1); err != nil { diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index 00b32cc9393..20258f03fa4 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -20,7 +20,7 @@ type SequencerBatchStreamWriter struct { logPrefix string legacyVerifier *verifier.LegacyExecutorVerifier sdb *stageDb - streamServer *server.DataStreamServer + streamServer server.DataStreamServer hasExecutors bool } @@ -32,7 +32,7 @@ func newSequencerBatchStreamWriter(batchContext *BatchContext, batchState *Batch logPrefix: batchContext.s.LogPrefix(), legacyVerifier: batchContext.cfg.legacyVerifier, sdb: batchContext.sdb, - streamServer: batchContext.cfg.datastreamServer, + streamServer: batchContext.cfg.dataStreamServer, hasExecutors: batchState.hasExecutorForThisBatch, } } @@ -107,17 +107,17 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBun } func alignExecutionToDatastream(batchContext *BatchContext, lastExecutedBlock uint64, u stagedsync.Unwinder) (bool, error) { - lastStartedDatastreamBatch, err := batchContext.cfg.datastreamServer.GetHighestBatchNumber() + lastStartedDatastreamBatch, err := batchContext.cfg.dataStreamServer.GetHighestBatchNumber() if err != nil { return false, err } - lastClosedDatastreamBatch, err := batchContext.cfg.datastreamServer.GetHighestClosedBatch() + lastClosedDatastreamBatch, err := batchContext.cfg.dataStreamServer.GetHighestClosedBatch() if err != nil { return false, err } - lastDatastreamBlock, err := batchContext.cfg.datastreamServer.GetHighestBlockNumber() + lastDatastreamBlock, err := batchContext.cfg.dataStreamServer.GetHighestBlockNumber() if err != nil { return false, err } @@ -147,7 +147,7 @@ func alignExecutionToDatastream(batchContext *BatchContext, lastExecutedBlock ui } func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, batchToClose, blockToCloseAt uint64) error { - isLastEntryBatchEnd, err := batchContext.cfg.datastreamServer.IsLastEntryBatchEnd() + isLastEntryBatchEnd, err := batchContext.cfg.dataStreamServer.IsLastEntryBatchEnd() if err != nil { return err } @@ -168,7 +168,7 @@ func finalizeLastBatchInDatastream(batchContext *BatchContext, batchToClose, blo return err } root := lastBlock.Root() - if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchToClose, &root, &ler); err != nil { + if err = batchContext.cfg.dataStreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchToClose, &root, &ler); err != nil { return err } return nil diff --git a/zk/stages/stage_sequence_execute_resequence.go b/zk/stages/stage_sequence_execute_resequence.go index dee485079fc..d7fa2e18ab7 100644 --- a/zk/stages/stage_sequence_execute_resequence.go +++ b/zk/stages/stage_sequence_execute_resequence.go @@ -23,12 +23,12 @@ func resequence( log.Info(fmt.Sprintf("[%s] Last batch %d is lower than highest batch in datastream %d, resequencing...", s.LogPrefix(), lastBatch, highestBatchInDs)) - batches, err := cfg.datastreamServer.ReadBatches(lastBatch+1, highestBatchInDs) + batches, err := cfg.dataStreamServer.ReadBatches(lastBatch+1, highestBatchInDs) if err != nil { return err } - if err = cfg.datastreamServer.UnwindToBatchStart(lastBatch + 1); err != nil { + if err = cfg.dataStreamServer.UnwindToBatchStart(lastBatch + 1); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index c72c7954de4..62460830fc5 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -14,7 +14,6 @@ import ( "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" @@ -33,13 +32,13 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/l1infotree" verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/txpool" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/zk/l1infotree" ) const ( @@ -75,8 +74,7 @@ type SequenceBlockCfg struct { syncCfg ethconfig.Sync genesis *types.Genesis agg *libstate.Aggregator - stream *datastreamer.StreamServer - datastreamServer *server.DataStreamServer + dataStreamServer server.DataStreamServer zk *ethconfig.Zk miningConfig *params.MiningConfig @@ -107,7 +105,7 @@ func StageSequenceBlocksCfg( genesis *types.Genesis, syncCfg ethconfig.Sync, agg *libstate.Aggregator, - stream *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, zk *ethconfig.Zk, miningConfig *params.MiningConfig, @@ -135,8 +133,7 @@ func StageSequenceBlocksCfg( historyV3: historyV3, syncCfg: syncCfg, agg: agg, - stream: stream, - datastreamServer: server.NewDataStreamServer(stream, chainConfig.ChainID.Uint64()), + dataStreamServer: dataStreamServer, zk: zk, miningConfig: miningConfig, txPool: txPool, @@ -173,10 +170,10 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC func validateIfDatastreamIsAheadOfExecution( s *stagedsync.StageState, -// u stagedsync.Unwinder, + // u stagedsync.Unwinder, ctx context.Context, cfg SequenceBlockCfg, -// historyCfg stagedsync.HistoryCfg, + // historyCfg stagedsync.HistoryCfg, ) error { roTx, err := cfg.db.BeginRo(ctx) if err != nil { @@ -189,7 +186,7 @@ func validateIfDatastreamIsAheadOfExecution( return err } - lastDatastreamBlock, err := cfg.datastreamServer.GetHighestBlockNumber() + lastDatastreamBlock, err := cfg.dataStreamServer.GetHighestBlockNumber() if err != nil { return err } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index f1335b12164..4ada15e99ec 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -10,6 +10,11 @@ import ( stages "github.com/ledgerwatch/erigon/eth/stagedsync" stages2 "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/zk/datastream/server" +) + +var ( + dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() ) func SequencerZkStages( From 4c3c3d180f6cc5a7edd13931280bc776cf8d7fb2 Mon Sep 17 00:00:00 2001 From: Ji Hwan KIM <125336262+jhkimqd@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:53:16 +0900 Subject: [PATCH 46/88] test: unit tests for zk/txpool/send.go (#1420) * test: add initial unit tests for zk/txpool/send.go Signed-off-by: Ji Hwan * chore: temporarily remove failing tests Signed-off-by: Ji Hwan * test: use functions under zk directory Signed-off-by: Ji Hwan * fix: fix failing unit tests Signed-off-by: Ji Hwan * chore: cleanup tests Signed-off-by: Ji Hwan * chore: lint Signed-off-by: Ji Hwan --------- Signed-off-by: Ji Hwan Co-authored-by: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> --- zk/txpool/send_test.go | 176 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 zk/txpool/send_test.go diff --git a/zk/txpool/send_test.go b/zk/txpool/send_test.go new file mode 100644 index 00000000000..256e0374c7e --- /dev/null +++ b/zk/txpool/send_test.go @@ -0,0 +1,176 @@ +package txpool + +import ( + "context" + "fmt" + "testing" + + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon-lib/txpool" + types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" +) + +func testRlps(num int) [][]byte { + rlps := make([][]byte, num) + for i := 0; i < num; i++ { + rlps[i] = []byte{1} + } + return rlps +} + +func toHashes(h ...byte) (out types2.Hashes) { + for i := range h { + hash := [32]byte{h[i]} + out = append(out, hash[:]...) + } + return out +} + +func toPeerIDs(h ...byte) (out []types2.PeerID) { + for i := range h { + hash := [64]byte{h[i]} + out = append(out, gointerfaces.ConvertHashToH512(hash)) + } + return out +} + +func TestSendTxPropagate(t *testing.T) { + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + t.Run("few remote byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + send.BroadcastPooledTxs(testRlps(2)) + send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + require.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("much remote byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + list := make(types2.Hashes, p2pTxPacketLimit*3) + for i := 0; i < len(list); i += 32 { + b := []byte(fmt.Sprintf("%x", i)) + copy(list[i:i+32], b) + } + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + send.BroadcastPooledTxs(testRlps(len(list) / 32)) + send.AnnouncePooledTxs([]byte{0, 1, 2}, []uint32{10, 12, 14}, list) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + require.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + require.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("few local byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + send.BroadcastPooledTxs(testRlps(2)) + send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + assert.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("sync with new peer", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + times := 3 + requests := make([]*sentry.SendMessageByIdRequest, 0, times) + + sentryServer.EXPECT(). + SendMessageById(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageByIdRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }). + Times(times) + + sentryServer.EXPECT().PeerById(gomock.Any(), gomock.Any()). + DoAndReturn( + func(_ context.Context, r *sentry.PeerByIdRequest) (*sentry.PeerByIdReply, error) { + return &sentry.PeerByIdReply{ + Peer: &types.PeerInfo{ + Id: r.PeerId.String(), + Caps: []string{"eth/68"}, + }}, nil + }).AnyTimes() + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + expectPeers := toPeerIDs(1, 2, 42) + send.PropagatePooledTxsToPeersList(expectPeers, []byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 3, len(requests)) + for i, req := range requests { + assert.Equal(t, expectPeers[i], types2.PeerID(req.PeerId)) + assert.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, req.Data.Id) + assert.True(t, len(req.Data.Data) > 0) + } + }) +} From 29fd2da02238e4dd141950cf194c08d30bd5c54e Mon Sep 17 00:00:00 2001 From: Jerry Date: Wed, 13 Nov 2024 01:40:50 -0800 Subject: [PATCH 47/88] Remove redundant writes when a state object is reverted (#21) (#1450) * Remove redundant writes when a state object is reverted (#21) * Remove redundant writes when a state object is reverted * Change IsDirty to Transaction level We don't want a reverted transaction to show up in written trace because it was touched by a previous transaction. * Add storage read whenever there is a sstore This fixes an issue when a storage slot is * written but got reverted * never read by sLoad opcode When this happens, we still need to include the storage slot in the trace. * fix test --- core/state/intra_block_state.go | 5 +++++ core/vm/evmtypes/evmtypes.go | 1 + core/vm/instructions_zkevm_test.go | 2 ++ eth/tracers/native/zero.go | 3 ++- 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 5e3e5877269..b082b364351 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -430,6 +430,11 @@ func (sdb *IntraBlockState) SeenAccount(addr libcommon.Address) bool { return ok } +func (sdb *IntraBlockState) IsDirtyJournal(addr libcommon.Address) bool { + _, ok := sdb.journal.dirties[addr] + return ok +} + func (sdb *IntraBlockState) HasLiveState(addr libcommon.Address, key *libcommon.Hash) bool { if stateObject := sdb.stateObjects[addr]; stateObject != nil { if _, ok := stateObject.originStorage[*key]; ok { diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go index 4f8570f84b0..dcde6ab4e86 100644 --- a/core/vm/evmtypes/evmtypes.go +++ b/core/vm/evmtypes/evmtypes.go @@ -84,6 +84,7 @@ type IntraBlockState interface { SetState(common.Address, *common.Hash, uint256.Int) HasLiveAccount(addr common.Address) bool SeenAccount(addr common.Address) bool + IsDirtyJournal(addr common.Address) bool HasLiveState(addr common.Address, key *common.Hash) bool GetTransientState(addr common.Address, key common.Hash) uint256.Int diff --git a/core/vm/instructions_zkevm_test.go b/core/vm/instructions_zkevm_test.go index c2398b68c95..a63038c321b 100644 --- a/core/vm/instructions_zkevm_test.go +++ b/core/vm/instructions_zkevm_test.go @@ -211,3 +211,5 @@ func (ibs TestIntraBlockState) Prepare(rules *chain.Rules, sender, coinbase comm func (ibs TestIntraBlockState) Selfdestruct6780(common.Address) {} func (ibs TestIntraBlockState) SetDisableBalanceInc(disable bool) {} + +func (ibs TestIntraBlockState) IsDirtyJournal(addr common.Address) bool { return false } diff --git a/eth/tracers/native/zero.go b/eth/tracers/native/zero.go index 65c32c30069..3593a38eea1 100644 --- a/eth/tracers/native/zero.go +++ b/eth/tracers/native/zero.go @@ -220,7 +220,7 @@ func (t *zeroTracer) CaptureTxEnd(restGas uint64) { trace.StorageRead = nil } - if len(trace.StorageWritten) == 0 || !hasLiveAccount { + if len(trace.StorageWritten) == 0 || !hasLiveAccount || !t.env.IntraBlockState().IsDirtyJournal(addr) { trace.StorageWritten = nil } else { // A slot write could be reverted if the transaction is reverted. We will need to read the value from the statedb again to get the correct value. @@ -379,6 +379,7 @@ func (t *zeroTracer) addSLOADToAccount(addr libcommon.Address, key libcommon.Has func (t *zeroTracer) addSSTOREToAccount(addr libcommon.Address, key libcommon.Hash, value *uint256.Int) { t.tx.Traces[addr].StorageWritten[key] = value + t.tx.Traces[addr].StorageReadMap[key] = struct{}{} t.addOpCodeToAccount(addr, vm.SSTORE) } From ab0d2787e0d3cc05ae04861684660ee9a83ddb42 Mon Sep 17 00:00:00 2001 From: Ji Hwan KIM <125336262+jhkimqd@users.noreply.github.com> Date: Wed, 13 Nov 2024 18:56:48 +0900 Subject: [PATCH 48/88] test: fix tests for zkevm rollup address + rollup manager address calls (#1452) Signed-off-by: Ji Hwan Co-authored-by: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> --- turbo/jsonrpc/zkevm_api_test.go | 88 +++++++++++++++++++++++---------- 1 file changed, 62 insertions(+), 26 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 73d215d8190..c9cda1e73f8 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -1452,37 +1452,73 @@ func TestGetForks(t *testing.T) { func TestGetRollupAddress(t *testing.T) { assert := assert.New(t) - // Init new ZkConfig - cfgZk := ethconfig.DefaultZkConfig - assert.NotNil(cfgZk) - - // Check rollup address of default ZkConfig - assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x0")) - - // Modify ZkConfig - cfgZk.AddressZkevm = common.HexToAddress("0x1") - assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x1")) - cfgZk.AddressZkevm = common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A") - assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A")) - cfgZk.AddressZkevm = common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9") - assert.Equal(cfgZk.AddressZkevm, common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9")) + ////////////// + contractBackend := backends.NewTestSimulatedBackendWithConfig(t, gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + contractBackend.Commit() + /////////// + + db := contractBackend.DB() + agg := contractBackend.Agg() + + baseApi := NewBaseApi(nil, stateCache, contractBackend.BlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout, contractBackend.Engine(), datadir.New(t.TempDir())) + ethImpl := NewEthAPI(baseApi, db, nil, nil, nil, 5000000, 100_000, 100_000, ðconfig.Defaults, false, 100, 100, log.New()) + var l1Syncer *syncer.L1Syncer + zkEvmImpl := NewZkEvmAPI(ethImpl, db, 100_000, ðconfig.Defaults, l1Syncer, "", nil) + + // Call the GetRollupAddress method and check that the result matches the default value. + var result common.Address + rollupAddress, err := zkEvmImpl.GetRollupAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x0")) + + // Modify the ZkConfig and retry calling the method. + zkEvmImpl.config.AddressZkevm = common.HexToAddress("0x1") + rollupAddress, err = zkEvmImpl.GetRollupAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x1")) } func TestGetRollupManagerAddress(t *testing.T) { assert := assert.New(t) - // Init new ZkConfig - cfgZk := ethconfig.DefaultZkConfig - assert.NotNil(cfgZk) + ////////////// + contractBackend := backends.NewTestSimulatedBackendWithConfig(t, gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + contractBackend.Commit() + /////////// - // Check rollup manager address of default ZkConfig - assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x0")) + db := contractBackend.DB() + agg := contractBackend.Agg() - // Modify ZkConfig - cfgZk.AddressRollup = common.HexToAddress("0x1") - assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x1")) - cfgZk.AddressRollup = common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A") - assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x9f77a1fB020Bf0980b75828e3fbdAB13A1D7824A")) - cfgZk.AddressRollup = common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9") - assert.Equal(cfgZk.AddressRollup, common.HexToAddress("0x5F5221e63CC430C00E65cb9D85066f710650faa9")) + baseApi := NewBaseApi(nil, stateCache, contractBackend.BlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout, contractBackend.Engine(), datadir.New(t.TempDir())) + ethImpl := NewEthAPI(baseApi, db, nil, nil, nil, 5000000, 100_000, 100_000, ðconfig.Defaults, false, 100, 100, log.New()) + var l1Syncer *syncer.L1Syncer + zkEvmImpl := NewZkEvmAPI(ethImpl, db, 100_000, ðconfig.Defaults, l1Syncer, "", nil) + + // Call the GetRollupManagerAddress method and check that the result matches the default value. + var result common.Address + rollupManagerAddress, err := zkEvmImpl.GetRollupManagerAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupManagerAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x0")) + + // Modify the ZkConfig and retry calling the method. + zkEvmImpl.config.AddressRollup = common.HexToAddress("0x1") + rollupManagerAddress, err = zkEvmImpl.GetRollupManagerAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupManagerAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x1")) } From 06e93d49704726977297cf1d5055f43185e313bd Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:07:08 +0200 Subject: [PATCH 49/88] fix: defer rollback on new tx (#1453) --- zk/stages/stage_batches.go | 1 + 1 file changed, 1 insertion(+) diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index 0fbb448c16b..e55ad91f707 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -315,6 +315,7 @@ func SpawnStageBatches( if tx, err = cfg.db.BeginRw(ctx); err != nil { return fmt.Errorf("failed to open tx, %w", err) } + defer tx.Rollback() hermezDb.SetNewTx(tx) eriDb.SetNewTx(tx) batchProcessor.SetNewTx(tx) From 912378d526d77148b709eefdbba325e65aba33d6 Mon Sep 17 00:00:00 2001 From: tclemos Date: Wed, 13 Nov 2024 09:06:55 -0300 Subject: [PATCH 50/88] add assertion to data stream catch up test --- zk/stages/stage_data_stream_catch_up_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zk/stages/stage_data_stream_catch_up_test.go b/zk/stages/stage_data_stream_catch_up_test.go index 85f8b57b345..00b1fb880d6 100644 --- a/zk/stages/stage_data_stream_catch_up_test.go +++ b/zk/stages/stage_data_stream_catch_up_test.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "gotest.tools/v3/assert" ) func TestSpawnStageDataStreamCatchup(t *testing.T) { @@ -95,5 +96,8 @@ func TestSpawnStageDataStreamCatchup(t *testing.T) { require.NoError(t, err) // Assert - + // check SaveStageProgress + stageProgress, err := stages.GetStageProgress(tx1, stages.DataStream) + require.NoError(t, err) + assert.Equal(t, uint64(20), stageProgress) } From 70f9b98f2ba052696ac082b82459e60d306b3a62 Mon Sep 17 00:00:00 2001 From: Ji Hwan KIM <125336262+jhkimqd@users.noreply.github.com> Date: Wed, 13 Nov 2024 21:58:28 +0900 Subject: [PATCH 51/88] test: add test file for fetch.go (#1401) * test: add test file for fetch.go Signed-off-by: Ji Hwan * chore: lint Signed-off-by: Ji Hwan * chore: cleanup Signed-off-by: Ji Hwan * docs: add docs in test code Signed-off-by: Ji Hwan * test: generate mock for pool interface Signed-off-by: Ji Hwan * test: add test for TxPool's OnNewBlock function Signed-off-by: Ji Hwan --------- Signed-off-by: Ji Hwan Co-authored-by: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> --- zk/txpool/fetch_test.go | 96 ++++++++++++++++++++++++++ zk/txpool/pool_mock.go | 149 ++++++++++++++++++++++++++++++++++++++++ zk/txpool/pool_test.go | 75 ++++++++++++++++++++ 3 files changed, 320 insertions(+) create mode 100644 zk/txpool/fetch_test.go create mode 100644 zk/txpool/pool_mock.go diff --git a/zk/txpool/fetch_test.go b/zk/txpool/fetch_test.go new file mode 100644 index 00000000000..c912ec6d0b6 --- /dev/null +++ b/zk/txpool/fetch_test.go @@ -0,0 +1,96 @@ +package txpool + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "testing" + "time" + + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/u256" + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" + "github.com/ledgerwatch/erigon-lib/txpool" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + types "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +var peerID types.PeerID = gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}) // "12345" + +// DecodeHex converts a hex string to a byte array. +func decodeHex(in string) []byte { + payload, err := hex.DecodeString(in) + if err != nil { + panic(err) + } + return payload +} + +func TestFetch(t *testing.T) { + assert, require := assert.New(t), require.New(t) + ch := make(chan types.Announcements, 100) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + defer coreDB.Close() + db := memdb.NewTestPoolDB(t) + path := fmt.Sprintf("/tmp/db-test-%v", time.Now().UTC().Format(time.RFC3339Nano)) + txPoolDB := newTestTxPoolDB(t, path) + defer txPoolDB.Close() + aclsDB := newTestACLDB(t, path) + defer aclsDB.Close() + + // Check if the dbs are created. + require.NotNil(t, db) + require.NotNil(t, txPoolDB) + require.NotNil(t, aclsDB) + + cfg := txpoolcfg.DefaultConfig + ethCfg := ðconfig.Defaults + sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) + pool, err := New(ch, coreDB, cfg, ethCfg, sendersCache, *u256.N1, nil, nil, aclsDB) + assert.NoError(err) + require.True(pool != nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctrl := gomock.NewController(t) + remoteKvClient := remote.NewMockKVClient(ctrl) + sentryServer := sentry.NewMockSentryServer(ctrl) + + m := txpool.NewMockSentry(ctx, sentryServer) + sentryClient := direct.NewSentryClientDirect(direct.ETH66, m) + fetch := NewFetch(ctx, []direct.SentryClient{sentryClient}, pool, remoteKvClient, nil, nil, *u256.N1) + var wg sync.WaitGroup + fetch.SetWaitGroup(&wg) + // The corresponding WaitGroup.Done() will be called by the Sentry. + // First will be called by (txpool.MockSentry).Messages + // Second will be called by (txpool.MockSentry).PeerEvents + m.StreamWg.Add(2) + fetch.ConnectSentries() + m.StreamWg.Wait() + + // Send one transaction id with ETH66 protocol. + // The corresponding WaitGroup.Done() will be called by the fetch.receiveMessage() + wg.Add(1) + errs := m.Send(&sentry.InboundMessage{ + Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + Data: decodeHex("e1a0595e27a835cd79729ff1eeacec3120eeb6ed1464a04ec727aaca734ead961328"), + PeerId: peerID, + }) + for i, err := range errs { + if err != nil { + t.Errorf("sending new pool txn hashes 66 (%d): %v", i, err) + } + } + wg.Wait() +} diff --git a/zk/txpool/pool_mock.go b/zk/txpool/pool_mock.go new file mode 100644 index 00000000000..28d9217a38d --- /dev/null +++ b/zk/txpool/pool_mock.go @@ -0,0 +1,149 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pool.go + +// Package txpool is a generated GoMock package. +package txpool + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + kv "github.com/ledgerwatch/erigon-lib/kv" + types "github.com/ledgerwatch/erigon-lib/types" +) + +// MockPool is a mock of Pool interface. +type MockPool struct { + ctrl *gomock.Controller + recorder *MockPoolMockRecorder +} + +// MockPoolMockRecorder is the mock recorder for MockPool. +type MockPoolMockRecorder struct { + mock *MockPool +} + +// NewMockPool creates a new mock instance. +func NewMockPool(ctrl *gomock.Controller) *MockPool { + mock := &MockPool{ctrl: ctrl} + mock.recorder = &MockPoolMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPool) EXPECT() *MockPoolMockRecorder { + return m.recorder +} + +// AddLocalTxs mocks base method. +func (m *MockPool) AddLocalTxs(ctx context.Context, newTxs types.TxSlots, tx kv.Tx) ([]DiscardReason, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddLocalTxs", ctx, newTxs, tx) + ret0, _ := ret[0].([]DiscardReason) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddLocalTxs indicates an expected call of AddLocalTxs. +func (mr *MockPoolMockRecorder) AddLocalTxs(ctx, newTxs, tx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLocalTxs", reflect.TypeOf((*MockPool)(nil).AddLocalTxs), ctx, newTxs, tx) +} + +// AddNewGoodPeer mocks base method. +func (m *MockPool) AddNewGoodPeer(peerID types.PeerID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddNewGoodPeer", peerID) +} + +// AddNewGoodPeer indicates an expected call of AddNewGoodPeer. +func (mr *MockPoolMockRecorder) AddNewGoodPeer(peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewGoodPeer", reflect.TypeOf((*MockPool)(nil).AddNewGoodPeer), peerID) +} + +// AddRemoteTxs mocks base method. +func (m *MockPool) AddRemoteTxs(ctx context.Context, newTxs types.TxSlots) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRemoteTxs", ctx, newTxs) +} + +// AddRemoteTxs indicates an expected call of AddRemoteTxs. +func (mr *MockPoolMockRecorder) AddRemoteTxs(ctx, newTxs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRemoteTxs", reflect.TypeOf((*MockPool)(nil).AddRemoteTxs), ctx, newTxs) +} + +// GetRlp mocks base method. +func (m *MockPool) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRlp", tx, hash) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRlp indicates an expected call of GetRlp. +func (mr *MockPoolMockRecorder) GetRlp(tx, hash interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRlp", reflect.TypeOf((*MockPool)(nil).GetRlp), tx, hash) +} + +// IdHashKnown mocks base method. +func (m *MockPool) IdHashKnown(tx kv.Tx, hash []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IdHashKnown", tx, hash) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IdHashKnown indicates an expected call of IdHashKnown. +func (mr *MockPoolMockRecorder) IdHashKnown(tx, hash interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IdHashKnown", reflect.TypeOf((*MockPool)(nil).IdHashKnown), tx, hash) +} + +// OnNewBlock mocks base method. +func (m *MockPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnNewBlock", ctx, stateChanges, unwindTxs, minedTxs, tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnNewBlock indicates an expected call of OnNewBlock. +func (mr *MockPoolMockRecorder) OnNewBlock(ctx, stateChanges, unwindTxs, minedTxs, tx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockPool)(nil).OnNewBlock), ctx, stateChanges, unwindTxs, minedTxs, tx) +} + +// Started mocks base method. +func (m *MockPool) Started() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Started") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Started indicates an expected call of Started. +func (mr *MockPoolMockRecorder) Started() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Started", reflect.TypeOf((*MockPool)(nil).Started)) +} + +// ValidateSerializedTxn mocks base method. +func (m *MockPool) ValidateSerializedTxn(serializedTxn []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateSerializedTxn", serializedTxn) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateSerializedTxn indicates an expected call of ValidateSerializedTxn. +func (mr *MockPoolMockRecorder) ValidateSerializedTxn(serializedTxn interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSerializedTxn", reflect.TypeOf((*MockPool)(nil).ValidateSerializedTxn), serializedTxn) +} diff --git a/zk/txpool/pool_test.go b/zk/txpool/pool_test.go index ef80af15139..1df01735e3e 100644 --- a/zk/txpool/pool_test.go +++ b/zk/txpool/pool_test.go @@ -3,6 +3,7 @@ package txpool import ( "context" "fmt" + "io" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/u256" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" @@ -20,6 +22,8 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" ) func TestNonceFromAddress(t *testing.T) { @@ -169,3 +173,74 @@ func TestNonceFromAddress(t *testing.T) { } } } + +func TestOnNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + coreDB, db := memdb.NewTestDB(t), memdb.NewTestDB(t) + ctrl := gomock.NewController(t) + + stream := remote.NewMockKV_StateChangesClient(ctrl) + i := 0 + stream.EXPECT(). + Recv(). + DoAndReturn(func() (*remote.StateChangeBatch, error) { + if i > 0 { + return nil, io.EOF + } + i++ + return &remote.StateChangeBatch{ + StateVersionId: 1, + ChangeBatch: []*remote.StateChange{ + { + Txs: [][]byte{ + decodeHex(types.TxParseMainnetTests[0].PayloadStr), + decodeHex(types.TxParseMainnetTests[1].PayloadStr), + decodeHex(types.TxParseMainnetTests[2].PayloadStr), + }, + BlockHeight: 1, + BlockHash: gointerfaces.ConvertHashToH256([32]byte{}), + }, + }, + }, nil + }). + AnyTimes() + + stateChanges := remote.NewMockKVClient(ctrl) + stateChanges. + EXPECT(). + StateChanges(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ *remote.StateChangeRequest, _ ...grpc.CallOption) (remote.KV_StateChangesClient, error) { + return stream, nil + }) + + pool := NewMockPool(ctrl) + pool.EXPECT(). + ValidateSerializedTxn(gomock.Any()). + DoAndReturn(func(_ []byte) error { + return nil + }). + Times(3) + + var minedTxs types.TxSlots + pool.EXPECT(). + OnNewBlock(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn( + func( + _ context.Context, + _ *remote.StateChangeBatch, + _ types.TxSlots, + minedTxsArg types.TxSlots, + _ kv.Tx, + ) error { + minedTxs = minedTxsArg + return nil + }, + ). + Times(1) + + fetch := NewFetch(ctx, nil, pool, stateChanges, coreDB, db, *u256.N1) + err := fetch.handleStateChanges(ctx, stateChanges) + assert.ErrorIs(t, io.EOF, err) + assert.Equal(t, 3, len(minedTxs.Txs)) +} From dc72b56146f507c3fa2f5236e1e11d82912d3dd3 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Thu, 14 Nov 2024 10:11:23 +0900 Subject: [PATCH 52/88] fix: follow mock generate convention being used in makefile to avoid pool_mock.go from being deleted Signed-off-by: Ji Hwan --- zk/txpool/pool.go | 2 + zk/txpool/pool_mock.go | 261 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 231 insertions(+), 32 deletions(-) diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index d7662eb980c..e019e13a139 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -76,6 +76,8 @@ var ( // Pool is interface for the transaction pool // This interface exists for the convenience of testing, and not yet because // there are multiple implementations +// +//go:generate mockgen -typed=true -destination=./pool_mock.go -package=txpool . Pool type Pool interface { ValidateSerializedTxn(serializedTxn []byte) error diff --git a/zk/txpool/pool_mock.go b/zk/txpool/pool_mock.go index 28d9217a38d..10d4aab2fc4 100644 --- a/zk/txpool/pool_mock.go +++ b/zk/txpool/pool_mock.go @@ -1,5 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: pool.go +// Source: github.com/ledgerwatch/erigon/zk/txpool (interfaces: Pool) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./pool_mock.go -package=txpool . Pool +// // Package txpool is a generated GoMock package. package txpool @@ -8,10 +13,10 @@ import ( context "context" reflect "reflect" - gomock "go.uber.org/mock/gomock" remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" kv "github.com/ledgerwatch/erigon-lib/kv" types "github.com/ledgerwatch/erigon-lib/types" + gomock "go.uber.org/mock/gomock" ) // MockPool is a mock of Pool interface. @@ -38,86 +43,230 @@ func (m *MockPool) EXPECT() *MockPoolMockRecorder { } // AddLocalTxs mocks base method. -func (m *MockPool) AddLocalTxs(ctx context.Context, newTxs types.TxSlots, tx kv.Tx) ([]DiscardReason, error) { +func (m *MockPool) AddLocalTxs(arg0 context.Context, arg1 types.TxSlots, arg2 kv.Tx) ([]DiscardReason, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddLocalTxs", ctx, newTxs, tx) + ret := m.ctrl.Call(m, "AddLocalTxs", arg0, arg1, arg2) ret0, _ := ret[0].([]DiscardReason) ret1, _ := ret[1].(error) return ret0, ret1 } // AddLocalTxs indicates an expected call of AddLocalTxs. -func (mr *MockPoolMockRecorder) AddLocalTxs(ctx, newTxs, tx interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) AddLocalTxs(arg0, arg1, arg2 any) *MockPoolAddLocalTxsCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLocalTxs", reflect.TypeOf((*MockPool)(nil).AddLocalTxs), ctx, newTxs, tx) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLocalTxs", reflect.TypeOf((*MockPool)(nil).AddLocalTxs), arg0, arg1, arg2) + return &MockPoolAddLocalTxsCall{Call: call} +} + +// MockPoolAddLocalTxsCall wrap *gomock.Call +type MockPoolAddLocalTxsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddLocalTxsCall) Return(arg0 []DiscardReason, arg1 error) *MockPoolAddLocalTxsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddLocalTxsCall) Do(f func(context.Context, types.TxSlots, kv.Tx) ([]DiscardReason, error)) *MockPoolAddLocalTxsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddLocalTxsCall) DoAndReturn(f func(context.Context, types.TxSlots, kv.Tx) ([]DiscardReason, error)) *MockPoolAddLocalTxsCall { + c.Call = c.Call.DoAndReturn(f) + return c } // AddNewGoodPeer mocks base method. -func (m *MockPool) AddNewGoodPeer(peerID types.PeerID) { +func (m *MockPool) AddNewGoodPeer(arg0 types.PeerID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddNewGoodPeer", peerID) + m.ctrl.Call(m, "AddNewGoodPeer", arg0) } // AddNewGoodPeer indicates an expected call of AddNewGoodPeer. -func (mr *MockPoolMockRecorder) AddNewGoodPeer(peerID interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) AddNewGoodPeer(arg0 any) *MockPoolAddNewGoodPeerCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewGoodPeer", reflect.TypeOf((*MockPool)(nil).AddNewGoodPeer), peerID) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewGoodPeer", reflect.TypeOf((*MockPool)(nil).AddNewGoodPeer), arg0) + return &MockPoolAddNewGoodPeerCall{Call: call} +} + +// MockPoolAddNewGoodPeerCall wrap *gomock.Call +type MockPoolAddNewGoodPeerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddNewGoodPeerCall) Return() *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddNewGoodPeerCall) Do(f func(types.PeerID)) *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddNewGoodPeerCall) DoAndReturn(f func(types.PeerID)) *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.DoAndReturn(f) + return c } // AddRemoteTxs mocks base method. -func (m *MockPool) AddRemoteTxs(ctx context.Context, newTxs types.TxSlots) { +func (m *MockPool) AddRemoteTxs(arg0 context.Context, arg1 types.TxSlots) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRemoteTxs", ctx, newTxs) + m.ctrl.Call(m, "AddRemoteTxs", arg0, arg1) } // AddRemoteTxs indicates an expected call of AddRemoteTxs. -func (mr *MockPoolMockRecorder) AddRemoteTxs(ctx, newTxs interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) AddRemoteTxs(arg0, arg1 any) *MockPoolAddRemoteTxsCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRemoteTxs", reflect.TypeOf((*MockPool)(nil).AddRemoteTxs), ctx, newTxs) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRemoteTxs", reflect.TypeOf((*MockPool)(nil).AddRemoteTxs), arg0, arg1) + return &MockPoolAddRemoteTxsCall{Call: call} +} + +// MockPoolAddRemoteTxsCall wrap *gomock.Call +type MockPoolAddRemoteTxsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddRemoteTxsCall) Return() *MockPoolAddRemoteTxsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddRemoteTxsCall) Do(f func(context.Context, types.TxSlots)) *MockPoolAddRemoteTxsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddRemoteTxsCall) DoAndReturn(f func(context.Context, types.TxSlots)) *MockPoolAddRemoteTxsCall { + c.Call = c.Call.DoAndReturn(f) + return c } // GetRlp mocks base method. -func (m *MockPool) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) { +func (m *MockPool) GetRlp(arg0 kv.Tx, arg1 []byte) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRlp", tx, hash) + ret := m.ctrl.Call(m, "GetRlp", arg0, arg1) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // GetRlp indicates an expected call of GetRlp. -func (mr *MockPoolMockRecorder) GetRlp(tx, hash interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) GetRlp(arg0, arg1 any) *MockPoolGetRlpCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRlp", reflect.TypeOf((*MockPool)(nil).GetRlp), tx, hash) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRlp", reflect.TypeOf((*MockPool)(nil).GetRlp), arg0, arg1) + return &MockPoolGetRlpCall{Call: call} +} + +// MockPoolGetRlpCall wrap *gomock.Call +type MockPoolGetRlpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolGetRlpCall) Return(arg0 []byte, arg1 error) *MockPoolGetRlpCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolGetRlpCall) Do(f func(kv.Tx, []byte) ([]byte, error)) *MockPoolGetRlpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolGetRlpCall) DoAndReturn(f func(kv.Tx, []byte) ([]byte, error)) *MockPoolGetRlpCall { + c.Call = c.Call.DoAndReturn(f) + return c } // IdHashKnown mocks base method. -func (m *MockPool) IdHashKnown(tx kv.Tx, hash []byte) (bool, error) { +func (m *MockPool) IdHashKnown(arg0 kv.Tx, arg1 []byte) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IdHashKnown", tx, hash) + ret := m.ctrl.Call(m, "IdHashKnown", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // IdHashKnown indicates an expected call of IdHashKnown. -func (mr *MockPoolMockRecorder) IdHashKnown(tx, hash interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) IdHashKnown(arg0, arg1 any) *MockPoolIdHashKnownCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IdHashKnown", reflect.TypeOf((*MockPool)(nil).IdHashKnown), tx, hash) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IdHashKnown", reflect.TypeOf((*MockPool)(nil).IdHashKnown), arg0, arg1) + return &MockPoolIdHashKnownCall{Call: call} +} + +// MockPoolIdHashKnownCall wrap *gomock.Call +type MockPoolIdHashKnownCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolIdHashKnownCall) Return(arg0 bool, arg1 error) *MockPoolIdHashKnownCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolIdHashKnownCall) Do(f func(kv.Tx, []byte) (bool, error)) *MockPoolIdHashKnownCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolIdHashKnownCall) DoAndReturn(f func(kv.Tx, []byte) (bool, error)) *MockPoolIdHashKnownCall { + c.Call = c.Call.DoAndReturn(f) + return c } // OnNewBlock mocks base method. -func (m *MockPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error { +func (m *MockPool) OnNewBlock(arg0 context.Context, arg1 *remote.StateChangeBatch, arg2, arg3 types.TxSlots, arg4 kv.Tx) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OnNewBlock", ctx, stateChanges, unwindTxs, minedTxs, tx) + ret := m.ctrl.Call(m, "OnNewBlock", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // OnNewBlock indicates an expected call of OnNewBlock. -func (mr *MockPoolMockRecorder) OnNewBlock(ctx, stateChanges, unwindTxs, minedTxs, tx interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) OnNewBlock(arg0, arg1, arg2, arg3, arg4 any) *MockPoolOnNewBlockCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockPool)(nil).OnNewBlock), ctx, stateChanges, unwindTxs, minedTxs, tx) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockPool)(nil).OnNewBlock), arg0, arg1, arg2, arg3, arg4) + return &MockPoolOnNewBlockCall{Call: call} +} + +// MockPoolOnNewBlockCall wrap *gomock.Call +type MockPoolOnNewBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolOnNewBlockCall) Return(arg0 error) *MockPoolOnNewBlockCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolOnNewBlockCall) Do(f func(context.Context, *remote.StateChangeBatch, types.TxSlots, types.TxSlots, kv.Tx) error) *MockPoolOnNewBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolOnNewBlockCall) DoAndReturn(f func(context.Context, *remote.StateChangeBatch, types.TxSlots, types.TxSlots, kv.Tx) error) *MockPoolOnNewBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c } // Started mocks base method. @@ -129,21 +278,69 @@ func (m *MockPool) Started() bool { } // Started indicates an expected call of Started. -func (mr *MockPoolMockRecorder) Started() *gomock.Call { +func (mr *MockPoolMockRecorder) Started() *MockPoolStartedCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Started", reflect.TypeOf((*MockPool)(nil).Started)) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Started", reflect.TypeOf((*MockPool)(nil).Started)) + return &MockPoolStartedCall{Call: call} +} + +// MockPoolStartedCall wrap *gomock.Call +type MockPoolStartedCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolStartedCall) Return(arg0 bool) *MockPoolStartedCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolStartedCall) Do(f func() bool) *MockPoolStartedCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolStartedCall) DoAndReturn(f func() bool) *MockPoolStartedCall { + c.Call = c.Call.DoAndReturn(f) + return c } // ValidateSerializedTxn mocks base method. -func (m *MockPool) ValidateSerializedTxn(serializedTxn []byte) error { +func (m *MockPool) ValidateSerializedTxn(arg0 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateSerializedTxn", serializedTxn) + ret := m.ctrl.Call(m, "ValidateSerializedTxn", arg0) ret0, _ := ret[0].(error) return ret0 } // ValidateSerializedTxn indicates an expected call of ValidateSerializedTxn. -func (mr *MockPoolMockRecorder) ValidateSerializedTxn(serializedTxn interface{}) *gomock.Call { +func (mr *MockPoolMockRecorder) ValidateSerializedTxn(arg0 any) *MockPoolValidateSerializedTxnCall { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSerializedTxn", reflect.TypeOf((*MockPool)(nil).ValidateSerializedTxn), serializedTxn) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSerializedTxn", reflect.TypeOf((*MockPool)(nil).ValidateSerializedTxn), arg0) + return &MockPoolValidateSerializedTxnCall{Call: call} +} + +// MockPoolValidateSerializedTxnCall wrap *gomock.Call +type MockPoolValidateSerializedTxnCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolValidateSerializedTxnCall) Return(arg0 error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolValidateSerializedTxnCall) Do(f func([]byte) error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolValidateSerializedTxnCall) DoAndReturn(f func([]byte) error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.DoAndReturn(f) + return c } From a82dcdf49656e622fa2f3df87b6a5132abb2cb8b Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Thu, 14 Nov 2024 15:19:01 +0900 Subject: [PATCH 53/88] test: add unit test for p256verifyzkevm Signed-off-by: Ji Hwan --- core/vm/contracts_zkevm_test.go | 49 ++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go index e3a8d27d3c1..0d3b521ea6e 100644 --- a/core/vm/contracts_zkevm_test.go +++ b/core/vm/contracts_zkevm_test.go @@ -1,8 +1,13 @@ package vm import ( - "testing" + "bytes" + "fmt" "math/big" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/common" ) var ( @@ -11,6 +16,12 @@ var ( big8194 = big.NewInt(0).Lsh(big.NewInt(1), 8194) ) +// allPrecompiles does not map to the actual set of precompiles, as it also contains +// repriced versions of precompiles at certain slots +var allPrecompilesZkevm = map[libcommon.Address]PrecompiledContract{ + libcommon.BytesToAddress([]byte{0x01, 0x00}): &p256Verify_zkevm{enabled: true}, +} + func Test_ModExpZkevm_Gas(t *testing.T) { modExp := bigModExp_zkevm{enabled: true, eip2565: true} @@ -67,3 +78,39 @@ func uint64ToDeterminedBytes(input *big.Int, length int) []byte { copy(result[length-len(bytes):], bytes) return result } + +func TestP256VerifyZkevm(t *testing.T) { + testJsonZkevm("p256Verify", "0x0000000000000000000000000000000000000100", t) +} + +func testJsonZkevm(name, addr string, t *testing.T) { + tests, err := loadJson(name) + if err != nil { + t.Fatal(err) + } + for _, test := range tests { + testPrecompiledZkevm(t, addr, test) + } +} + +func testPrecompiledZkevm(t *testing.T, addr string, test precompiledTest) { + p := allPrecompilesZkevm[libcommon.HexToAddress(addr)] + in := libcommon.Hex2Bytes(test.Input) + gas := p.RequiredGas(in) + t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { + t.Parallel() + if res, _, err := RunPrecompiledContract(p, in, gas); err != nil { + t.Error(err) + } else if common.Bytes2Hex(res) != test.Expected { + t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) + } + if expGas := test.Gas; expGas != gas { + t.Errorf("%v: gas wrong, expected %d, got %d", test.Name, expGas, gas) + } + // Verify that the precompile did not touch the input buffer + exp := libcommon.Hex2Bytes(test.Input) + if !bytes.Equal(in, exp) { + t.Errorf("Precompiled %v modified input data", addr) + } + }) +} From 55c9b565cc2504457c448b83ef326c7462483986 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Thu, 14 Nov 2024 20:15:21 +0900 Subject: [PATCH 54/88] test: add test for bigmodexpzkevm Signed-off-by: Ji Hwan --- core/vm/contracts_zkevm_test.go | 98 ++++++++++++++++----------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go index 0d3b521ea6e..dba8d446e4d 100644 --- a/core/vm/contracts_zkevm_test.go +++ b/core/vm/contracts_zkevm_test.go @@ -16,10 +16,52 @@ var ( big8194 = big.NewInt(0).Lsh(big.NewInt(1), 8194) ) -// allPrecompiles does not map to the actual set of precompiles, as it also contains -// repriced versions of precompiles at certain slots -var allPrecompilesZkevm = map[libcommon.Address]PrecompiledContract{ - libcommon.BytesToAddress([]byte{0x01, 0x00}): &p256Verify_zkevm{enabled: true}, +func uint64To32Bytes(input int) []byte { + bigInt := new(big.Int).SetUint64(uint64(input)) + bytes := bigInt.Bytes() + result := make([]byte, 32) + copy(result[32-len(bytes):], bytes) + return result +} + +func uint64ToDeterminedBytes(input *big.Int, length int) []byte { + bytes := input.Bytes() + result := make([]byte, length) + copy(result[length-len(bytes):], bytes) + return result +} + +// This relies on PrecompiledContractsForkID13Durian. +func testPrecompiledZkevm(t *testing.T, addr string, test precompiledTest) { + p := PrecompiledContractsForkID13Durian[libcommon.HexToAddress(addr)] + in := libcommon.Hex2Bytes(test.Input) + gas := p.RequiredGas(in) + t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { + t.Parallel() + if res, _, err := RunPrecompiledContract(p, in, gas); err != nil { + t.Error(err) + } else if common.Bytes2Hex(res) != test.Expected { + t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) + } + if expGas := test.Gas; expGas != gas { + t.Errorf("%v: gas wrong, expected %d, got %d", test.Name, expGas, gas) + } + // Verify that the precompile did not touch the input buffer + exp := libcommon.Hex2Bytes(test.Input) + if !bytes.Equal(in, exp) { + t.Errorf("Precompiled %v modified input data", addr) + } + }) +} + +func testJsonZkevm(name, addr string, t *testing.T) { + tests, err := loadJson(name) + if err != nil { + t.Fatal(err) + } + for _, test := range tests { + testPrecompiledZkevm(t, addr, test) + } } func Test_ModExpZkevm_Gas(t *testing.T) { @@ -64,53 +106,11 @@ func Test_ModExpZkevm_Gas(t *testing.T) { } } -func uint64To32Bytes(input int) []byte { - bigInt := new(big.Int).SetUint64(uint64(input)) - bytes := bigInt.Bytes() - result := make([]byte, 32) - copy(result[32-len(bytes):], bytes) - return result -} - -func uint64ToDeterminedBytes(input *big.Int, length int) []byte { - bytes := input.Bytes() - result := make([]byte, length) - copy(result[length-len(bytes):], bytes) - return result -} - func TestP256VerifyZkevm(t *testing.T) { testJsonZkevm("p256Verify", "0x0000000000000000000000000000000000000100", t) } -func testJsonZkevm(name, addr string, t *testing.T) { - tests, err := loadJson(name) - if err != nil { - t.Fatal(err) - } - for _, test := range tests { - testPrecompiledZkevm(t, addr, test) - } -} - -func testPrecompiledZkevm(t *testing.T, addr string, test precompiledTest) { - p := allPrecompilesZkevm[libcommon.HexToAddress(addr)] - in := libcommon.Hex2Bytes(test.Input) - gas := p.RequiredGas(in) - t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { - t.Parallel() - if res, _, err := RunPrecompiledContract(p, in, gas); err != nil { - t.Error(err) - } else if common.Bytes2Hex(res) != test.Expected { - t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) - } - if expGas := test.Gas; expGas != gas { - t.Errorf("%v: gas wrong, expected %d, got %d", test.Name, expGas, gas) - } - // Verify that the precompile did not touch the input buffer - exp := libcommon.Hex2Bytes(test.Input) - if !bytes.Equal(in, exp) { - t.Errorf("Precompiled %v modified input data", addr) - } - }) +// EIP2565 is enabled by default. +func TestBigModExpZkevm(t *testing.T) { + testJsonZkevm("modexp_eip2565", "0x0000000000000000000000000000000000000005", t) } From 205a4f41af1bd0946885086b09f1908981b366f7 Mon Sep 17 00:00:00 2001 From: Xavier Romero <47888584+xavier-romero@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:38:06 +0100 Subject: [PATCH 55/88] Fix modexp issue returning wrong result (#1468) --- core/vm/contracts_zkevm.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 037f2bc7c42..895e1162820 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -395,18 +395,26 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() + base = big.NewInt(0) + exp = big.NewInt(0) + mod = big.NewInt(0) ) - if len(input) > 96 { - input = input[96:] - } else { - input = input[:0] + + if len(input) >= 96 + int(baseLen) { + base = new(big.Int).SetBytes(getData(input, 96, uint64(baseLen))) + } + if len(input) >= 96 + int(baseLen) + int(expLen) { + exp = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen), uint64(expLen))) + } + if len(input) >= 96 + int(baseLen) + int(expLen) + int(modLen) { + mod = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen) + uint64(expLen), uint64(modLen))) + } + if len(input) < 96 + int(baseLen) + int(expLen) + int(modLen) { + input = common.LeftPadBytes(input, 96 + int(baseLen) + int(expLen) + int(modLen)) } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) v []byte baseBitLen = base.BitLen() expBitLen = exp.BitLen() From 1e2977c527c84240f7fe78f9be8e2785b3d4f11e Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:28:07 +0000 Subject: [PATCH 56/88] witness forced storage inclusion (#1465) --- cmd/utils/flags.go | 5 ++++ core/state/trie_db.go | 12 ++++++++- eth/backend.go | 1 + eth/ethconfig/config_zkevm.go | 1 + smt/pkg/smt/witness_test.go | 3 ++- turbo/cli/default_flags.go | 1 + turbo/cli/flags_zkevm.go | 10 ++++++++ turbo/jsonrpc/zkevm_api.go | 29 ++++++++++++++++++++- zk/witness/witness.go | 48 +++++++++++++++++++++++------------ 9 files changed, 91 insertions(+), 19 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 59936ed948a..e9810955ab4 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -763,6 +763,11 @@ var ( Usage: "Mock the witness generation", Value: false, } + WitnessContractInclusion = cli.StringFlag{ + Name: "zkevm.witness-contract-inclusion", + Usage: "Contracts that will have all of their storage added to the witness every time", + Value: "", + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", Usage: "Number of entries to print from the ACL history on node start up", diff --git a/core/state/trie_db.go b/core/state/trie_db.go index fb23f799cf8..3a13013b83e 100644 --- a/core/state/trie_db.go +++ b/core/state/trie_db.go @@ -885,7 +885,7 @@ func (tds *TrieDbState) GetTrieHash() common.Hash { return tds.t.Hash() } -func (tds *TrieDbState) ResolveSMTRetainList() (*trie.RetainList, error) { +func (tds *TrieDbState) ResolveSMTRetainList(inclusion map[libcommon.Address][]libcommon.Hash) (*trie.RetainList, error) { // Aggregating the current buffer, if any if tds.currentBuffer != nil { if tds.aggregateBuffer == nil { @@ -967,6 +967,16 @@ func (tds *TrieDbState) ResolveSMTRetainList() (*trie.RetainList, error) { keys = append(keys, smtPath) } + for address, slots := range inclusion { + for _, slot := range slots { + smtPath, err := getSMTPath(address.String(), slot.String()) + if err != nil { + return nil, err + } + keys = append(keys, smtPath) + } + } + rl := trie.NewRetainList(0) for _, key := range keys { diff --git a/eth/backend.go b/eth/backend.go index 7299e75844f..87c944b7092 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -1110,6 +1110,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.chainConfig, backend.config.Zk, backend.engine, + backend.config.WitnessContractInclusion, ) var legacyExecutors []*legacy_executor_verifier.Executor = make([]*legacy_executor_verifier.Executor, 0, len(cfg.ExecutorUrls)) diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 375bbd10793..31b069531f0 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -94,6 +94,7 @@ type Zk struct { BadBatches []uint64 SealBatchImmediatelyOnOverflow bool MockWitnessGeneration bool + WitnessContractInclusion []common.Address } var DefaultZkConfig = &Zk{} diff --git a/smt/pkg/smt/witness_test.go b/smt/pkg/smt/witness_test.go index b055f375e78..87dae548915 100644 --- a/smt/pkg/smt/witness_test.go +++ b/smt/pkg/smt/witness_test.go @@ -52,7 +52,8 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { err = intraBlockState.CommitBlock(&chain.Rules{}, w) require.NoError(t, err, "error committing block") - rl, err := tds.ResolveSMTRetainList() + inclusions := make(map[libcommon.Address][]libcommon.Hash) + rl, err := tds.ResolveSMTRetainList(inclusions) require.NoError(t, err, "error resolving state trie") memdb := db.NewMemDb() diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index ffc404728dd..7ea5b33a24f 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -289,4 +289,5 @@ var DefaultFlags = []cli.Flag{ &utils.InfoTreeUpdateInterval, &utils.SealBatchImmediatelyOnOverflow, &utils.MockWitnessGeneration, + &utils.WitnessContractInclusion, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 5ad9ddec3d2..9aa4cecca5b 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -131,6 +131,15 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { badBatches = append(badBatches, val) } + var witnessInclusion []libcommon.Address + for _, s := range strings.Split(ctx.String(utils.WitnessContractInclusion.Name), ",") { + if s == "" { + // if there are no entries then we can just ignore it and move on + continue + } + witnessInclusion = append(witnessInclusion, libcommon.HexToAddress(s)) + } + cfg.Zk = ðconfig.Zk{ L2ChainId: ctx.Uint64(utils.L2ChainIdFlag.Name), L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name), @@ -210,6 +219,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), + WitnessContractInclusion: witnessInclusion, } utils2.EnableTimer(cfg.DebugTimers) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 2d08e56004e..90c077fdc7c 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -46,6 +46,8 @@ import ( "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "math" ) var sha3UncleHash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") @@ -1023,6 +1025,7 @@ func (api *ZkEvmAPIImpl) buildGenerator(ctx context.Context, tx kv.Tx, witnessMo chainConfig, api.config.Zk, api.ethApi._engine, + api.config.WitnessContractInclusion, ) fullWitness := false @@ -1709,7 +1712,31 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, ibs.GetState(address, &key, value) } - rl, err := tds.ResolveSMTRetainList() + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + chainCfg, err := api.chainConfig(ctx, tx) + if err != nil { + return nil, err + } + + plainState := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainCfg.ChainName]) + defer plainState.Close() + + inclusion := make(map[libcommon.Address][]libcommon.Hash) + for _, contract := range zkapi.config.WitnessContractInclusion { + err = plainState.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + + rl, err := tds.ResolveSMTRetainList(inclusion) if err != nil { return nil, err } diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 1c1bb3acf7c..5ae7ac04bcf 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -35,6 +35,8 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + "github.com/holiman/uint256" + "math" ) var ( @@ -44,14 +46,15 @@ var ( ) type Generator struct { - tx kv.Tx - dirs datadir.Dirs - historyV3 bool - agg *libstate.Aggregator - blockReader services.FullBlockReader - chainCfg *chain.Config - zkConfig *ethconfig.Zk - engine consensus.EngineReader + tx kv.Tx + dirs datadir.Dirs + historyV3 bool + agg *libstate.Aggregator + blockReader services.FullBlockReader + chainCfg *chain.Config + zkConfig *ethconfig.Zk + engine consensus.EngineReader + forcedContracts []libcommon.Address } func NewGenerator( @@ -62,15 +65,17 @@ func NewGenerator( chainCfg *chain.Config, zkConfig *ethconfig.Zk, engine consensus.EngineReader, + forcedContracs []libcommon.Address, ) *Generator { return &Generator{ - dirs: dirs, - historyV3: historyV3, - agg: agg, - blockReader: blockReader, - chainCfg: chainCfg, - zkConfig: zkConfig, - engine: engine, + dirs: dirs, + historyV3: historyV3, + agg: agg, + blockReader: blockReader, + chainCfg: chainCfg, + zkConfig: zkConfig, + engine: engine, + forcedContracts: forcedContracs, } } @@ -337,12 +342,23 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint prevStateRoot = block.Root() } + inclusion := make(map[libcommon.Address][]libcommon.Hash) + for _, contract := range g.forcedContracts { + err = reader.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + var rl trie.RetainDecider // if full is true, we will send all the nodes to the witness rl = &trie.AlwaysTrueRetainDecider{} if !witnessFull { - rl, err = tds.ResolveSMTRetainList() + rl, err = tds.ResolveSMTRetainList(inclusion) if err != nil { return nil, err } From 981e3ede3fb304aba2f5ba828d7ac9e7e9e3bf34 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Thu, 14 Nov 2024 17:33:56 +0000 Subject: [PATCH 57/88] fix for deadlock in db concurrency (#1472) --- erigon-lib/kv/mdbx/kv_mdbx.go | 71 +++++++++++++++++++++--------- erigon-lib/kv/mdbx/kv_mdbx_test.go | 39 ++++++++++++++++ 2 files changed, 88 insertions(+), 22 deletions(-) diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 8d5bb64e7b8..9b26cf3182c 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -59,7 +59,8 @@ type MdbxOpts struct { // must be in the range from 12.5% (almost empty) to 50% (half empty) // which corresponds to the range from 8192 and to 32768 in units respectively log log.Logger - roTxsLimiter *semaphore.Weighted + readTxLimiter *semaphore.Weighted + writeTxLimiter *semaphore.Weighted bucketsCfg TableCfgFunc path string syncPeriod time.Duration @@ -109,7 +110,7 @@ func (opts MdbxOpts) DirtySpace(s uint64) MdbxOpts { } func (opts MdbxOpts) RoTxsLimiter(l *semaphore.Weighted) MdbxOpts { - opts.roTxsLimiter = l + opts.readTxLimiter = l return opts } @@ -386,20 +387,26 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { // return nil, err //} - if opts.roTxsLimiter == nil { + if opts.readTxLimiter == nil { targetSemCount := int64(runtime.GOMAXPROCS(-1) * 16) - opts.roTxsLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen + opts.readTxLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen + } + + if opts.writeTxLimiter == nil { + targetSemCount := int64(runtime.GOMAXPROCS(-1)) - 1 + opts.writeTxLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen } txsCountMutex := &sync.Mutex{} db := &MdbxKV{ - opts: opts, - env: env, - log: opts.log, - buckets: kv.TableCfg{}, - txSize: dirtyPagesLimit * opts.pageSize, - roTxsLimiter: opts.roTxsLimiter, + opts: opts, + env: env, + log: opts.log, + buckets: kv.TableCfg{}, + txSize: dirtyPagesLimit * opts.pageSize, + readTxLimiter: opts.readTxLimiter, + writeTxLimiter: opts.writeTxLimiter, txsCountMutex: txsCountMutex, txsAllDoneOnCloseCond: sync.NewCond(txsCountMutex), @@ -468,14 +475,15 @@ func (opts MdbxOpts) MustOpen() kv.RwDB { } type MdbxKV struct { - log log.Logger - env *mdbx.Env - buckets kv.TableCfg - roTxsLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor) - opts MdbxOpts - txSize uint64 - closed atomic.Bool - path string + log log.Logger + env *mdbx.Env + buckets kv.TableCfg + readTxLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor) + writeTxLimiter *semaphore.Weighted + opts MdbxOpts + txSize uint64 + closed atomic.Bool + path string txsCount uint txsCountMutex *sync.Mutex @@ -748,7 +756,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { } // will return nil err if context is cancelled (may appear to acquire the semaphore) - if semErr := db.roTxsLimiter.Acquire(ctx, 1); semErr != nil { + if semErr := db.readTxLimiter.Acquire(ctx, 1); semErr != nil { db.trackTxEnd() return nil, fmt.Errorf("mdbx.MdbxKV.BeginRo: roTxsLimiter error %w", semErr) } @@ -757,7 +765,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { if txn == nil { // on error, or if there is whatever reason that we don't return a tx, // we need to free up the limiter slot, otherwise it could lead to deadlocks - db.roTxsLimiter.Release(1) + db.readTxLimiter.Release(1) db.trackTxEnd() } }() @@ -784,17 +792,34 @@ func (db *MdbxKV) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { } func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err error) { + if db.closed.Load() { + return nil, fmt.Errorf("db closed") + } + select { case <-ctx.Done(): return nil, ctx.Err() default: } + // will return nil err if context is cancelled (may appear to acquire the semaphore) + if semErr := db.writeTxLimiter.Acquire(ctx, 1); semErr != nil { + return nil, semErr + } + if !db.trackTxBegin() { return nil, fmt.Errorf("db closed") } runtime.LockOSThread() + defer func() { + if txn == nil { + // on error, or if there is whatever reason that we don't return a tx, + // we need to free up the limiter slot, otherwise it could lead to deadlocks + db.writeTxLimiter.Release(1) + runtime.UnlockOSThread() + } + }() tx, err := db.env.BeginTxn(nil, flags) if err != nil { runtime.UnlockOSThread() // unlock only in case of error. normal flow is "defer .Rollback()" @@ -1048,8 +1073,9 @@ func (tx *MdbxTx) Commit() error { tx.tx = nil tx.db.trackTxEnd() if tx.readOnly { - tx.db.roTxsLimiter.Release(1) + tx.db.readTxLimiter.Release(1) } else { + tx.db.writeTxLimiter.Release(1) runtime.UnlockOSThread() } tx.db.leakDetector.Del(tx.id) @@ -1099,8 +1125,9 @@ func (tx *MdbxTx) Rollback() { tx.tx = nil tx.db.trackTxEnd() if tx.readOnly { - tx.db.roTxsLimiter.Release(1) + tx.db.readTxLimiter.Release(1) } else { + tx.db.writeTxLimiter.Release(1) runtime.UnlockOSThread() } tx.db.leakDetector.Del(tx.id) diff --git a/erigon-lib/kv/mdbx/kv_mdbx_test.go b/erigon-lib/kv/mdbx/kv_mdbx_test.go index d30d8a5624d..c31a1379404 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx_test.go +++ b/erigon-lib/kv/mdbx/kv_mdbx_test.go @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" + "sync" ) func BaseCaseDB(t *testing.T) kv.RwDB { @@ -1087,3 +1088,41 @@ func TestDB_BatchTime(t *testing.T) { t.Fatal(err) } } + +func TestDeadlock(t *testing.T) { + path := t.TempDir() + logger := log.New() + table := "Table" + db := NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + table: kv.TableCfgItem{Flags: kv.DupSort}, + kv.Sequence: kv.TableCfgItem{}, + } + }).MapSize(128 * datasize.MB).MustOpen() + t.Cleanup(db.Close) + + wg := sync.WaitGroup{} + for i := 0; i < 300_000; i++ { + wg.Add(1) + go func(idx int) { + ctx := context.Background() + // create a write transaction every X requests + if idx%5 == 0 { + tx, err := db.BeginRw(ctx) + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + } else { + tx, err := db.BeginRo(ctx) + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + } + wg.Done() + }(i) + } + + wg.Wait() +} From 65585253bcf98c42410c377f135094fb357ba297 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 14 Nov 2024 18:25:16 +0000 Subject: [PATCH 58/88] fix(seq): latest block from execution for rpc (#1473) --- turbo/rpchelper/helper.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 4c09d49d774..7cecd4826d5 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -17,6 +17,7 @@ import ( borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/zk/sequencer" ) // unable to decode supplied params, or an invalid number of parameters @@ -37,9 +38,18 @@ func GetCanonicalBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filt } func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash libcommon.Hash, latest bool, err error) { - finishedBlockNumber, err := stages.GetStageProgress(tx, stages.Finish) - if err != nil { - return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + var finishedBlockNumber uint64 + + if !sequencer.IsSequencer() { + finishedBlockNumber, err = stages.GetStageProgress(tx, stages.Finish) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } + } else { + finishedBlockNumber, err = stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } } var ok bool From ca97992338857c04157bcc67e82194c8ee68fb34 Mon Sep 17 00:00:00 2001 From: Ji Hwan Date: Fri, 15 Nov 2024 17:46:46 +0900 Subject: [PATCH 59/88] fix: mod value not being padded + new test cases Signed-off-by: Ji Hwan --- core/vm/contracts_zkevm.go | 21 ++++++----- .../testdata/precompiles/modexp_eip2565.json | 35 +++++++++++++++++++ 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 895e1162820..60167e119c9 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -395,22 +395,21 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() - base = big.NewInt(0) - exp = big.NewInt(0) - mod = big.NewInt(0) + base = new(big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) ) - if len(input) >= 96 + int(baseLen) { + if len(input) >= 96+int(baseLen) { base = new(big.Int).SetBytes(getData(input, 96, uint64(baseLen))) } - if len(input) >= 96 + int(baseLen) + int(expLen) { - exp = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen), uint64(expLen))) + if len(input) >= 96+int(baseLen)+int(expLen) { + exp = new(big.Int).SetBytes(getData(input, 96+uint64(baseLen), uint64(expLen))) } - if len(input) >= 96 + int(baseLen) + int(expLen) + int(modLen) { - mod = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen) + uint64(expLen), uint64(modLen))) - } - if len(input) < 96 + int(baseLen) + int(expLen) + int(modLen) { - input = common.LeftPadBytes(input, 96 + int(baseLen) + int(expLen) + int(modLen)) + // Always pad mod value to size of modLen. The rest will be ignored. + mod = new(big.Int).SetBytes(getData(input, 96+uint64(baseLen)+uint64(expLen), uint64(modLen))) + if len(input) < 96+int(baseLen)+int(expLen)+int(modLen) { + input = common.LeftPadBytes(input, 96+int(baseLen)+int(expLen)+int(modLen)) } // Retrieve the operands and execute the exponentiation diff --git a/core/vm/testdata/precompiles/modexp_eip2565.json b/core/vm/testdata/precompiles/modexp_eip2565.json index c55441439eb..cafc908d24f 100644 --- a/core/vm/testdata/precompiles/modexp_eip2565.json +++ b/core/vm/testdata/precompiles/modexp_eip2565.json @@ -13,6 +13,41 @@ "Gas": 1360, "NoBenchmark": false }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "Expected": "", + "Name": "return_empty_byte_slice", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff80", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "right_padding_mod", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff800000000000000000000000000000000000000000000000000000000000000007", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "right_padding_mod_with_excess_data", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002003ffff80", + "Expected": "0000000000000000000000000000000000000000000000000000000000000000", + "Name": "zero_base_right_pad", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff8000000000000000000000000000000000000000000000000000000000000000", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "eip_example_3", + "Gas": 200, + "NoBenchmark": false + }, { "Input": "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", "Expected": "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc", From b1758e8beb85f2184132eab3a5bacdb5c8c79857 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Fri, 15 Nov 2024 13:03:15 +0000 Subject: [PATCH 60/88] Fix/txpool pending (#1477) * no overflow when configured and mining logic change * remove mined transactions from the pool per block --- core/vm/zk_batch_counters.go | 4 +++ zk/stages/stage_sequence_execute.go | 13 +++++++- zk/stages/stage_sequence_execute_state.go | 15 ++++++++-- .../stage_sequence_execute_transactions.go | 18 ++++++----- zk/txpool/pool_zk.go | 30 +++++++++++++++++++ 5 files changed, 69 insertions(+), 11 deletions(-) diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index b40da0a02c1..83b2c36ed96 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -142,6 +142,10 @@ func (bcc *BatchCounterCollector) processBatchLevelData() error { // CheckForOverflow returns true in the case that any counter has less than 0 remaining func (bcc *BatchCounterCollector) CheckForOverflow(verifyMerkleProof bool) (bool, error) { + // unlimited counters shouldn't overflow + if bcc.unlimitedCounters { + return false, nil + } combined, err := bcc.CombineCollectors(verifyMerkleProof) if err != nil { return false, err diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 3673d90f8cb..43883d8f82a 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -346,12 +346,21 @@ func sequencingBatchStep( return err } } else if !batchState.isL1Recovery() { + var allConditionsOK bool - batchState.blockState.transactionsForInclusion, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) + var newTransactions []types.Transaction + var newIds []common.Hash + + newTransactions, newIds, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) if err != nil { return err } + batchState.blockState.transactionsForInclusion = append(batchState.blockState.transactionsForInclusion, newTransactions...) + for idx, tx := range newTransactions { + batchState.blockState.transactionHashesToSlots[tx.Hash()] = newIds[idx] + } + if len(batchState.blockState.transactionsForInclusion) == 0 { if allConditionsOK { time.Sleep(batchContext.cfg.zk.SequencerTimeoutOnEmptyTxPool) @@ -513,6 +522,8 @@ func sequencingBatchStep( return err } + cfg.txPool.RemoveMinedTransactions(batchState.blockState.builtBlockElements.txSlots) + if batchState.isLimboRecovery() { stateRoot := block.Root() cfg.txPool.UpdateLimboRootByTxHash(batchState.limboRecoveryData.limboTxHash, &stateRoot) diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 4e74f6210a8..4ce99180806 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -165,7 +165,11 @@ func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { } func (bs *BatchState) onAddedTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { - bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) + slotId, ok := bs.blockState.transactionHashesToSlots[transaction.Hash()] + if !ok { + log.Warn("[batchState] transaction hash not found in transaction hashes to slots map", "hash", transaction.Hash()) + } + bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas, slotId) bs.hasAnyTransactionsInThisBatch = true } @@ -250,12 +254,15 @@ func newLimboRecoveryData(limboHeaderTimestamp uint64, limboTxHash *common.Hash) // TYPE BLOCK STATE type BlockState struct { transactionsForInclusion []types.Transaction + transactionHashesToSlots map[common.Hash]common.Hash builtBlockElements BuiltBlockElements blockL1RecoveryData *zktx.DecodedBatchL2Data } func newBlockState() *BlockState { - return &BlockState{} + return &BlockState{ + transactionHashesToSlots: make(map[common.Hash]common.Hash), + } } func (bs *BlockState) hasAnyTransactionForInclusion() bool { @@ -294,6 +301,7 @@ type BuiltBlockElements struct { receipts types.Receipts effectiveGases []uint8 executionResults []*core.ExecutionResult + txSlots []common.Hash } func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { @@ -303,11 +311,12 @@ func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { bbe.executionResults = []*core.ExecutionResult{} } -func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { +func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8, slotId common.Hash) { bbe.transactions = append(bbe.transactions, transaction) bbe.receipts = append(bbe.receipts, receipt) bbe.executionResults = append(bbe.executionResults, execResult) bbe.effectiveGases = append(bbe.effectiveGases, effectiveGas) + bbe.txSlots = append(bbe.txSlots, slotId) } type resequenceTxMetadata struct { diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 2b14891d3fb..713dc462d39 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -19,10 +19,11 @@ import ( "github.com/ledgerwatch/log/v3" ) -func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, bool, error) { +func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, []common.Hash, bool, error) { cfg.txPool.LockFlusher() defer cfg.txPool.UnlockFlusher() + var ids []common.Hash var transactions []types.Transaction var allConditionsOk bool var err error @@ -37,7 +38,7 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio if allConditionsOk, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, 0, alreadyYielded); err != nil { return err } - yieldedTxs, toRemove, err := extractTransactionsFromSlot(&slots) + yieldedTxs, yieldedIds, toRemove, err := extractTransactionsFromSlot(&slots) if err != nil { return err } @@ -45,12 +46,13 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio cfg.txPool.MarkForDiscardFromPendingBest(txId) } transactions = append(transactions, yieldedTxs...) + ids = append(ids, yieldedIds...) return nil }); err != nil { - return nil, allConditionsOk, err + return nil, nil, allConditionsOk, err } - return transactions, allConditionsOk, err + return transactions, ids, allConditionsOk, err } func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { @@ -68,7 +70,7 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm if slots != nil { // ignore the toRemove value here, we know the RLP will be sound as we had to read it from the pool // in the first place to get it into limbo - transactions, _, err = extractTransactionsFromSlot(slots) + transactions, _, _, err = extractTransactionsFromSlot(slots) if err != nil { return err } @@ -82,7 +84,8 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm return transactions, nil } -func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []common.Hash, error) { +func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []common.Hash, []common.Hash, error) { + ids := make([]common.Hash, 0, len(slot.TxIds)) transactions := make([]types.Transaction, 0, len(slot.Txs)) toRemove := make([]common.Hash, 0) for idx, txBytes := range slot.Txs { @@ -101,8 +104,9 @@ func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []co copy(sender[:], slot.Senders.At(idx)) transaction.SetSender(sender) transactions = append(transactions, transaction) + ids = append(ids, slot.TxIds[idx]) } - return transactions, toRemove, nil + return transactions, ids, toRemove, nil } type overflowType uint8 diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index d9c52bfc81d..950b1990a02 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -267,6 +267,36 @@ func (p *TxPool) MarkForDiscardFromPendingBest(txHash common.Hash) { } } +func (p *TxPool) RemoveMinedTransactions(ids []common.Hash) { + p.lock.Lock() + defer p.lock.Unlock() + + toDelete := make([]*metaTx, 0) + + p.all.ascendAll(func(mt *metaTx) bool { + for _, id := range ids { + if bytes.Equal(mt.Tx.IDHash[:], id[:]) { + toDelete = append(toDelete, mt) + switch mt.currentSubPool { + case PendingSubPool: + p.pending.Remove(mt) + case BaseFeeSubPool: + p.baseFee.Remove(mt) + case QueuedSubPool: + p.queued.Remove(mt) + default: + //already removed + } + } + } + return true + }) + + for _, mt := range toDelete { + p.discardLocked(mt, Mined) + } +} + // discards the transactions that are in overflowZkCoutners from pending // executes the discard function on them // deletes the tx from the sendersWithChangedState map From a8a794166026817d789b73790a10bee48479761f Mon Sep 17 00:00:00 2001 From: Xavier Romero Date: Fri, 15 Nov 2024 15:46:53 +0100 Subject: [PATCH 61/88] Fix modexp when partial base/exp/mod is set --- core/vm/contracts_zkevm.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 895e1162820..3e96dd6ad52 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -400,18 +400,24 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { mod = big.NewInt(0) ) - if len(input) >= 96 + int(baseLen) { - base = new(big.Int).SetBytes(getData(input, 96, uint64(baseLen))) + // Extract `base`, `exp`, and `mod` with padding as needed + baseData := getData(input, 96, uint64(baseLen)) + if uint64(len(baseData)) < baseLen { + baseData = common.RightPadBytes(baseData, int(baseLen)) } - if len(input) >= 96 + int(baseLen) + int(expLen) { - exp = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen), uint64(expLen))) - } - if len(input) >= 96 + int(baseLen) + int(expLen) + int(modLen) { - mod = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen) + uint64(expLen), uint64(modLen))) + base.SetBytes(baseData) + + expData := getData(input, 96+uint64(baseLen), uint64(expLen)) + if uint64(len(expData)) < expLen { + expData = common.RightPadBytes(expData, int(expLen)) } - if len(input) < 96 + int(baseLen) + int(expLen) + int(modLen) { - input = common.LeftPadBytes(input, 96 + int(baseLen) + int(expLen) + int(modLen)) + exp.SetBytes(expData) + + modData := getData(input, 96+uint64(baseLen)+uint64(expLen), uint64(modLen)) + if uint64(len(modData)) < modLen { + modData = common.RightPadBytes(modData, int(modLen)) } + mod.SetBytes(modData) // Retrieve the operands and execute the exponentiation var ( @@ -422,7 +428,7 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { ) if modBitLen == 0 { - return []byte{}, nil + return common.LeftPadBytes([]byte{}, int(modLen)), nil } if baseBitLen == 0 { From fe3e329a80a71401859f29d1e3f01f248e69d942 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:19:46 +0200 Subject: [PATCH 62/88] refactor: some better error description and handling (#1454) * refactor: some better error description and handling * refactor: stage batches errors tracing * refactor: l1syncer error trace * refactor: l1syncer error traces * fix: move execute block err in the correct place --- core/blockchain_zkevm.go | 65 +++++----- core/rawdb/accessors_indexes_zkevm.go | 12 +- eth/stagedsync/stage_execute_zkevm.go | 163 ++++++++++++-------------- zk/l1infotree/updater.go | 26 ++-- zk/stages/stage_batches.go | 148 +++++++++++------------ zk/stages/stage_l1_info_tree.go | 8 +- zk/stages/stage_l1syncer.go | 43 +++---- zk/syncer/l1_syncer.go | 6 +- 8 files changed, 226 insertions(+), 245 deletions(-) diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index 10b61a2972a..0a2aecb7e44 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -82,7 +82,7 @@ func ExecuteBlockEphemerallyZk( blockContext, _, ger, l1Blockhash, err := PrepareBlockTxExecution(chainConfig, vmConfig, blockHashFunc, nil, engine, chainReader, block, ibs, roHermezDb, blockGasLimit) if err != nil { - return nil, err + return nil, fmt.Errorf("PrepareBlockTxExecution: %w", err) } blockNum := block.NumberU64() @@ -93,22 +93,28 @@ func ExecuteBlockEphemerallyZk( ibs.SetTxContext(tx.Hash(), block.Hash(), txIndex) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - tracer, err := getTracer(txIndex, tx.Hash()) - if err != nil { - return nil, fmt.Errorf("could not obtain tracer: %w", err) + if vmConfig.Tracer, err = getTracer(txIndex, tx.Hash()); err != nil { + return nil, fmt.Errorf("getTracer: %w", err) } - vmConfig.Tracer = tracer writeTrace = true } txHash := tx.Hash() evm, effectiveGasPricePercentage, err := PrepareForTxExecution(chainConfig, vmConfig, blockContext, roHermezDb, ibs, block, &txHash, txIndex) if err != nil { - return nil, err + return nil, fmt.Errorf("PrepareForTxExecution: %w", err) } receipt, execResult, err := ApplyTransaction_zkevm(chainConfig, engine, evm, gp, ibs, state.NewNoopWriter(), header, tx, usedGas, effectiveGasPricePercentage, true) if err != nil { - return nil, err + if !vmConfig.StatelessExec { + return nil, fmt.Errorf("ApplyTransaction_zkevm tx %d from block %d [%v]: %w", txIndex, block.NumberU64(), tx.Hash().Hex(), err) + } + rejectedTxs = append(rejectedTxs, &RejectedTx{txIndex, err.Error()}) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } if writeTrace { if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { @@ -120,32 +126,20 @@ func ExecuteBlockEphemerallyZk( localReceipt := CreateReceiptForBlockInfoTree(receipt, chainConfig, blockNum, execResult) if err = ProcessReceiptForBlockExecution(receipt, roHermezDb, chainConfig, blockNum, header, tx); err != nil { - return nil, err + return nil, fmt.Errorf("ProcessReceiptForBlockExecution: %w", err) } - if err != nil { - if !vmConfig.StatelessExec { - return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", txIndex, block.NumberU64(), tx.Hash().Hex(), err) - } - rejectedTxs = append(rejectedTxs, &RejectedTx{txIndex, err.Error()}) - } else { - includedTxs = append(includedTxs, tx) - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) - } - } if !chainConfig.IsForkID7Etrog(block.NumberU64()) && !chainConfig.IsNormalcy(block.NumberU64()) { if err := ibs.ScalableSetSmtRootHash(roHermezDb); err != nil { - return nil, err + return nil, fmt.Errorf("ScalableSetSmtRootHash: %w", err) } } txSender, ok := tx.GetSender() if !ok { signer := types.MakeSigner(chainConfig, blockNum, block.Time()) - txSender, err = tx.Sender(*signer) - if err != nil { - return nil, err + if txSender, err = tx.Sender(*signer); err != nil { + return nil, fmt.Errorf("tx.Sender: %w", err) } } @@ -159,7 +153,7 @@ func ExecuteBlockEphemerallyZk( var l2InfoRoot *libcommon.Hash if chainConfig.IsForkID7Etrog(blockNum) { - l2InfoRoot, err = blockinfo.BuildBlockInfoTree( + if l2InfoRoot, err = blockinfo.BuildBlockInfoTree( &header.Coinbase, header.Number.Uint64(), header.Time, @@ -169,9 +163,8 @@ func ExecuteBlockEphemerallyZk( *l1Blockhash, *prevBlockRoot, &txInfos, - ) - if err != nil { - return nil, err + ); err != nil { + return nil, fmt.Errorf("BuildBlockInfoTree: %w", err) } } @@ -199,7 +192,7 @@ func ExecuteBlockEphemerallyZk( if !vmConfig.ReadOnly { txs := blockTransactions if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), chainReader, false, log.New()); err != nil { - return nil, err + return nil, fmt.Errorf("FinalizeBlockExecution: %w", err) } } blockLogs := ibs.Logs() @@ -245,7 +238,7 @@ func PrepareBlockTxExecution( if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, block.Header(), chainConfig, ibs, log.Root()); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("InitializeBlockExecution: %w", err) } } @@ -259,36 +252,36 @@ func PrepareBlockTxExecution( //[zkevm] - get the last batch number so we can check for empty batches in between it and the new one lastBatchInserted, err := roHermezDb.GetBatchNoByL2Block(blockNum - 1) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return nil, nil, nil, nil, fmt.Errorf("failed to get last batch inserted: %v", err) + return nil, nil, nil, nil, fmt.Errorf("GetBatchNoByL2Block: %w", err) } // write batches between last block and this if they exist currentBatch, err := roHermezDb.GetBatchNoByL2Block(blockNum) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBatchNoByL2Block: %w", err) } //[zkevm] get batches between last block and this one // plus this blocks ger gersInBetween, err := roHermezDb.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBatchGlobalExitRoots: %w", err) } blockGer, err := roHermezDb.GetBlockGlobalExitRoot(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBlockGlobalExitRoot: %w", err) } blockL1BlockHash, err := roHermezDb.GetBlockL1BlockHash(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBlockL1BlockHash: %w", err) } blockTime := block.Time() prevBlockRoot := prevBlockheader.Root l1InfoTreeIndexReused, err := roHermezDb.GetReusedL1InfoTreeIndex(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetReusedL1InfoTreeIndex: %w", err) } ibs.SyncerPreExecuteStateSet(chainConfig, blockNum, blockTime, &prevBlockRoot, &blockGer, &blockL1BlockHash, gersInBetween, l1InfoTreeIndexReused) /////////////////////////////////////////// @@ -321,7 +314,7 @@ func ProcessReceiptForBlockExecution(receipt *types.Receipt, roHermezDb state.Re // receipt root holds the intermediate stateroot after the tx intermediateState, err := roHermezDb.GetIntermediateTxStateRoot(blockNum, tx.Hash()) if err != nil { - return err + return fmt.Errorf("GetIntermediateTxStateRoot: %w", err) } receipt.PostState = intermediateState.Bytes() } else { diff --git a/core/rawdb/accessors_indexes_zkevm.go b/core/rawdb/accessors_indexes_zkevm.go index d4a31721196..282e05906f4 100644 --- a/core/rawdb/accessors_indexes_zkevm.go +++ b/core/rawdb/accessors_indexes_zkevm.go @@ -30,23 +30,23 @@ func WriteTxLookupEntries_zkEvm(db kv.Putter, block *types.Block) error { for _, tx := range block.Transactions() { data := block.Number().Bytes() if err := db.Put(kv.TxLookup, tx.Hash().Bytes(), data); err != nil { - return fmt.Errorf("failed to store transaction lookup entry: %W", err) + return fmt.Errorf("db.Put %s: %W", kv.TxLookup, err) } } return nil } -func TruncateTxLookupEntries_zkEvm(db kv.RwTx, fromBlockNum, toBlockNum uint64) error { +func TruncateTxLookupEntries_zkEvm(db kv.RwTx, fromBlockNum, toBlockNum uint64) (err error) { + var block *types.Block for i := fromBlockNum; i <= toBlockNum; i++ { - block, err := ReadBlockByNumber(db, i) - if err != nil { - return err + if block, err = ReadBlockByNumber(db, i); err != nil { + return fmt.Errorf("ReadBlockByNumber %d: %W", i, err) } for _, tx := range block.Transactions() { if err := db.Delete(kv.TxLookup, tx.Hash().Bytes()); err != nil { - return fmt.Errorf("failed to store transaction lookup entry: %W", err) + return fmt.Errorf("db.Delete %s: %W", kv.TxLookup, err) } } } diff --git a/eth/stagedsync/stage_execute_zkevm.go b/eth/stagedsync/stage_execute_zkevm.go index 6debcc4ed88..25fc5d649d8 100644 --- a/eth/stagedsync/stage_execute_zkevm.go +++ b/eth/stagedsync/stage_execute_zkevm.go @@ -39,7 +39,7 @@ import ( func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { if cfg.historyV3 { if err = ExecBlockV3(s, u, wrap.TxContainer{Tx: tx}, toBlock, ctx, cfg, initialCycle, log.New()); err != nil { - return err + return fmt.Errorf("ExecBlockV3: %w", err) } return nil } @@ -59,16 +59,15 @@ func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock ui quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(context.Background()); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } nextStageProgress, err := stages.GetStageProgress(tx, stages.HashState) if err != nil { - return err + return fmt.Errorf("getStageProgress: %w", err) } nextStagesExpectData := nextStageProgress > 0 // Incremental move of next stages depend on fully written ChangeSets, Receipts, CallTraceSet @@ -87,19 +86,19 @@ func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock ui }() if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, hermezDb, s.LogPrefix()); err != nil { - return err + return fmt.Errorf("UpdateZkEVMBlockCfg: %w", err) } eridb := erigon_db.NewErigonDb(tx) prevBlockRoot, prevBlockHash, err := getBlockHashValues(cfg, ctx, tx, s.BlockNumber) if err != nil { - return err + return fmt.Errorf("getBlockHashValues: %w", err) } to, total, err := getExecRange(cfg, tx, s.BlockNumber, toBlock, s.LogPrefix()) if err != nil { - return err + return fmt.Errorf("getExecRange: %w", err) } log.Info(fmt.Sprintf("[%s] Blocks execution", s.LogPrefix()), "from", s.BlockNumber, "to", to) @@ -126,7 +125,7 @@ Loop: //fetch values pre execute datastreamBlockHash, block, senders, err := getPreexecuteValues(cfg, ctx, tx, blockNum, prevBlockHash) if err != nil { - stoppedErr = err + stoppedErr = fmt.Errorf("getPreexecuteValues: %w", err) break } @@ -143,7 +142,7 @@ Loop: cfg.hd.ReportBadHeaderPoS(datastreamBlockHash, block.ParentHash()) } if cfg.badBlockHalt { - return err + return fmt.Errorf("executeBlockZk: %w", err) } } u.UnwindTo(blockNum-1, UnwindReason{Block: &datastreamBlockHash}) @@ -152,7 +151,7 @@ Loop: if execRs.BlockInfoTree != nil { if err = hermezDb.WriteBlockInfoRoot(blockNum, *execRs.BlockInfoTree); err != nil { - return err + return fmt.Errorf("WriteBlockInfoRoot: %w", err) } } @@ -174,20 +173,19 @@ Loop: log.Info("Committed State", "gas reached", currentStateGas, "gasTarget", gasState) currentStateGas = 0 if err = s.Update(batch, stageProgress); err != nil { - return err + return fmt.Errorf("s.Update: %w", err) } if err = batch.Flush(ctx, tx); err != nil { - return err + return fmt.Errorf("batch.Flush: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } tx, err = cfg.db.BeginRw(context.Background()) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } - // TODO: This creates stacked up deferrals defer tx.Rollback() eridb = erigon_db.NewErigonDb(tx) logger.SetTx(tx) @@ -198,41 +196,40 @@ Loop: //commit values post execute if err := postExecuteCommitValues(s.LogPrefix(), cfg, tx, eridb, batch, datastreamBlockHash, block, senders); err != nil { - return err + return fmt.Errorf("postExecuteCommitValues: %w", err) } } if err = s.Update(batch, stageProgress); err != nil { - return err + return fmt.Errorf("s.Update: %w", err) } // we need to artificially update the headers stage here as well to ensure that notifications // can fire at the end of the stage loop and inform RPC subscriptions of new blocks for example if err = stages.SaveStageProgress(tx, stages.Headers, stageProgress); err != nil { - return err + return fmt.Errorf("SaveStageProgress: %w", err) } if err = batch.Flush(ctx, tx); err != nil { - return fmt.Errorf("batch commit: %w", err) + return fmt.Errorf("batch.Flush: %w", err) } - _, err = rawdb.IncrementStateVersionByBlockNumberIfNeeded(tx, stageProgress) // stageProgress is latest processsed block number - if err != nil { - return fmt.Errorf("writing plain state version: %w", err) + // stageProgress is latest processsed block number + if _, err = rawdb.IncrementStateVersionByBlockNumberIfNeeded(tx, stageProgress); err != nil { + return fmt.Errorf("IncrementStateVersionByBlockNumberIfNeeded: %w", err) } if !useExternalTx { log.Info(fmt.Sprintf("[%s] Commiting DB transaction...", s.LogPrefix()), "block", stageProgress) if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } log.Info(fmt.Sprintf("[%s] Completed on", s.LogPrefix()), "block", stageProgress) - err = stoppedErr - return err + return stoppedErr } // returns the block's blockHash and header stateroot @@ -254,7 +251,7 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 if cfg.zk.DebugLimit > 0 { prevStageProgress, err := stages.GetStageProgress(tx, stages.Senders) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("getStageProgress: %w", err) } to := prevStageProgress if cfg.zk.DebugLimit < to { @@ -266,11 +263,11 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 shouldShortCircuit, noProgressTo, err := utils.ShouldShortCircuitExecution(tx, logPrefix, cfg.zk.L2ShortCircuitToVerifiedBatch) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("ShouldShortCircuitExecution: %w", err) } prevStageProgress, err := stages.GetStageProgress(tx, stages.Senders) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("getStageProgress: %w", err) } // skip if no progress @@ -296,12 +293,12 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 func getPreexecuteValues(cfg ExecuteBlockCfg, ctx context.Context, tx kv.RwTx, blockNum uint64, prevBlockHash common.Hash) (common.Hash, *types.Block, []common.Address, error) { preExecuteHeaderHash, err := rawdb.ReadCanonicalHash(tx, blockNum) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("ReadCanonicalHash: %w", err) } block, senders, err := cfg.blockReader.BlockWithSenders(ctx, tx, preExecuteHeaderHash, blockNum) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("BlockWithSenders: %w", err) } if block == nil { @@ -313,7 +310,7 @@ func getPreexecuteValues(cfg ExecuteBlockCfg, ctx context.Context, tx kv.RwTx, b if cfg.chainConfig.IsLondon(blockNum) { parentHeader, err := cfg.blockReader.Header(ctx, tx, prevBlockHash, blockNum-1) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("cfg.blockReader.Header: %w", err) } block.HeaderNoCopy().BaseFee = misc.CalcBaseFeeZk(cfg.chainConfig, parentHeader) } @@ -341,29 +338,29 @@ func postExecuteCommitValues( log.Warn(fmt.Sprintf("[%s] Blockhash mismatch", logPrefix), "blockNumber", blockNum, "datastreamBlockHash", datastreamBlockHash, "calculatedBlockHash", blockHash) } if err := rawdbZk.DeleteSenders(tx, datastreamBlockHash, blockNum); err != nil { - return fmt.Errorf("failed to delete senders: %v", err) + return fmt.Errorf("DeleteSenders: %w", err) } if err := rawdbZk.DeleteHeader(tx, datastreamBlockHash, blockNum); err != nil { - return fmt.Errorf("failed to delete header: %v", err) + return fmt.Errorf("DeleteHeader: %w", err) } bodyForStorage, err := rawdb.ReadBodyForStorageByKey(tx, dbutils.BlockBodyKey(blockNum, datastreamBlockHash)) if err != nil { - return err + return fmt.Errorf("ReadBodyForStorageByKey: %w", err) } if err := rawdb.DeleteBodyAndTransactions(tx, blockNum, datastreamBlockHash); err != nil { - return err + return fmt.Errorf("DeleteBodyAndTransactions: %w", err) } if err := rawdb.WriteBodyAndTransactions(tx, blockHash, blockNum, block.Transactions(), bodyForStorage); err != nil { - return err + return fmt.Errorf("WriteBodyAndTransactions: %w", err) } // [zkevm] senders were saved in stage_senders for headerHashes based on incomplete headers // in stage execute we complete the headers and senders should be moved to the correct headerHash // also we should delete other data based on the old hash, since it is unaccessable now if err := rawdb.WriteSenders(tx, blockHash, blockNum, senders); err != nil { - return fmt.Errorf("failed to write senders: %v", err) + return fmt.Errorf("failed to write senders: %w", err) } } @@ -388,13 +385,13 @@ func postExecuteCommitValues( later. */ if err := rawdb.WriteHeader_zkEvm(tx, header); err != nil { - return fmt.Errorf("failed to write header: %v", err) + return fmt.Errorf("WriteHeader_zkEvm: %w", err) } if err := rawdb.WriteHeadHeaderHash(tx, blockHash); err != nil { - return err + return fmt.Errorf("WriteHeadHeaderHash: %w", err) } if err := rawdb.WriteCanonicalHash(tx, blockHash, blockNum); err != nil { - return fmt.Errorf("failed to write header: %v", err) + return fmt.Errorf("WriteCanonicalHash: %w", err) } // if err := eridb.WriteBody(block.Number(), blockHash, block.Transactions()); err != nil { // return fmt.Errorf("failed to write body: %v", err) @@ -402,7 +399,7 @@ func postExecuteCommitValues( // write the new block lookup entries if err := rawdb.WriteTxLookupEntries_zkEvm(tx, block); err != nil { - return fmt.Errorf("failed to write tx lookup entries: %v", err) + return fmt.Errorf("WriteTxLookupEntries_zkEvm: %w", err) } return nil @@ -421,12 +418,12 @@ func executeBlockZk( initialCycle bool, stateStream bool, roHermezDb state.ReadOnlyHermezDb, -) (*core.EphemeralExecResultZk, error) { +) (execRs *core.EphemeralExecResultZk, err error) { blockNum := block.NumberU64() stateReader, stateWriter, err := newStateReaderWriter(batch, tx, block, writeChangesets, cfg.accumulator, cfg.blockReader, stateStream) if err != nil { - return nil, err + return nil, fmt.Errorf("newStateReaderWriter: %w", err) } // where the magic happens @@ -445,20 +442,19 @@ func executeBlockZk( vmConfig.Tracer = callTracer getHashFn := core.GetHashFn(block.Header(), getHeader) - execRs, err := core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer, roHermezDb, prevBlockRoot) - if err != nil { - return nil, err + if execRs, err = core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer, roHermezDb, prevBlockRoot); err != nil { + return nil, fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) } if writeReceipts { if err := rawdb.AppendReceipts(tx, blockNum, execRs.Receipts); err != nil { - return nil, err + return nil, fmt.Errorf("AppendReceipts: %w", err) } stateSyncReceipt := execRs.StateSyncReceipt if stateSyncReceipt != nil && stateSyncReceipt.Status == types.ReceiptStatusSuccessful { if err := rawdb.WriteBorReceipt(tx, block.NumberU64(), stateSyncReceipt); err != nil { - return nil, err + return nil, fmt.Errorf("WriteBorReceipt: %w", err) } } } @@ -470,7 +466,7 @@ func executeBlockZk( } if writeCallTraces { if err := callTracer.WriteToDb(tx, block, *cfg.vmConfig); err != nil { - return nil, err + return nil, fmt.Errorf("WriteToDb: %w", err) } } return execRs, nil @@ -482,9 +478,8 @@ func UnwindExecutionStageZk(u *UnwindState, s *StageState, tx kv.RwTx, ctx conte } useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(context.Background()); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } @@ -492,24 +487,24 @@ func UnwindExecutionStageZk(u *UnwindState, s *StageState, tx kv.RwTx, ctx conte logger := log.New() if err = unwindExecutionStage(u, s, wrap.TxContainer{Tx: tx}, ctx, cfg, initialCycle, logger); err != nil { - return err + return fmt.Errorf("unwindExecutionStage: %w", err) } if err = UnwindExecutionStageDbWrites(ctx, u, s, tx); err != nil { - return err + return fmt.Errorf("UnwindExecutionStageDbWrites: %w", err) } // update the headers stage as we mark progress there as part of execution if err = stages.SaveStageProgress(tx, stages.Headers, u.UnwindPoint); err != nil { - return err + return fmt.Errorf("SaveStageProgress: %w", err) } if err = u.Done(tx); err != nil { - return err + return fmt.Errorf("u.Done: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -522,9 +517,8 @@ func UnwindExecutionStageErigon(u *UnwindState, s *StageState, tx kv.RwTx, ctx c func PruneExecutionStageZk(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } @@ -536,48 +530,43 @@ func PruneExecutionStageZk(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx c cfg.agg.SetTx(tx) if initialCycle { if err = cfg.agg.Prune(ctx, config3.HistoryV3AggregationStep/10); err != nil { // prune part of retired data, before commit - return err + return fmt.Errorf("cfg.agg.prune: %w", err) } } else { if err = cfg.agg.PruneWithTiemout(ctx, 1*time.Second); err != nil { // prune part of retired data, before commit - return err + return fmt.Errorf("cfg.agg.PruneWithTiemout: %w", err) } } } else { if cfg.prune.History.Enabled() { if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, s.LogPrefix(), cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, s.LogPrefix(), cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } } if cfg.prune.Receipts.Enabled() { - if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - return err - } - if err = rawdb.PruneTable(tx, kv.BorReceipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxUint32); err != nil { - return err - } - // LogIndex.Prune will read everything what not pruned here - if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - return err + for _, table := range []string{kv.Receipts, kv.BorReceipts, kv.Log} { + if err = rawdb.PruneTable(tx, table, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + return fmt.Errorf("rawdb.PruneTable %s: %w", table, err) + } } } if cfg.prune.CallTraces.Enabled() { if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, s.LogPrefix(), cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } } } if err = s.Done(tx); err != nil { - return err + return fmt.Errorf("s.Done: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -588,9 +577,11 @@ func UnwindExecutionStageDbWrites(ctx context.Context, u *UnwindState, s *StageS // TODO: check for other missing value like - WriteHeader_zkEvm, WriteHeadHeaderHash, WriteCanonicalHash, WriteBody, WriteSenders, WriteTxLookupEntries_zkEvm hash, err := rawdb.ReadCanonicalHash(tx, u.UnwindPoint) if err != nil { - return err + return fmt.Errorf("ReadCanonicalHash: %w", err) + } + if err := rawdb.WriteHeadHeaderHash(tx, hash); err != nil { + return fmt.Errorf("WriteHeadHeaderHash: %w", err) } - rawdb.WriteHeadHeaderHash(tx, hash) /* unwind EffectiveGasPricePercentage here although it is written in stage batches (RPC) or stage execute (Sequencer) @@ -601,34 +592,34 @@ func UnwindExecutionStageDbWrites(ctx context.Context, u *UnwindState, s *StageS transactions, err := eriDb.GetBodyTransactions(u.UnwindPoint+1, s.BlockNumber) if err != nil { - return fmt.Errorf("get body transactions error: %v", err) + return fmt.Errorf("GetBodyTransactions: %w", err) } transactionHashes := make([]common.Hash, 0, len(*transactions)) for _, tx := range *transactions { transactionHashes = append(transactionHashes, tx.Hash()) } if err := hermezDb.DeleteEffectiveGasPricePercentages(&transactionHashes); err != nil { - return fmt.Errorf("delete effective gas price percentages error: %v", err) + return fmt.Errorf("DeleteEffectiveGasPricePercentages: %w", err) } if err = rawdbZk.TruncateSenders(tx, u.UnwindPoint+1, s.BlockNumber); err != nil { - return fmt.Errorf("delete senders: %w", err) + return fmt.Errorf("TruncateSenders: %w", err) } if err = rawdb.TruncateTxLookupEntries_zkEvm(tx, u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("delete tx lookup entires: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("dTruncateBlocks: %w", err) } if err = rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, true); err != nil { - return fmt.Errorf("delete cannonical hash with headers: %w", err) + return fmt.Errorf("TruncateCanonicalHash: %w", err) } if err = rawdb.TruncateStateVersion(tx, u.UnwindPoint+1); err != nil { - return err + return fmt.Errorf("TruncateStateVersion: %w", err) } if err = hermezDb.DeleteBlockInfoRoots(u.UnwindPoint+1, s.BlockNumber); err != nil { - return fmt.Errorf("delete block info roots: %w", err) + return fmt.Errorf("DeleteBlockInfoRoots: %w", err) } return nil diff --git a/zk/l1infotree/updater.go b/zk/l1infotree/updater.go index 28a8e8176ff..d1b5ac4362c 100644 --- a/zk/l1infotree/updater.go +++ b/zk/l1infotree/updater.go @@ -140,7 +140,7 @@ LOOP: tree, err := InitialiseL1InfoTree(hermezDb) if err != nil { - return nil, err + return nil, fmt.Errorf("InitialiseL1InfoTree: %w", err) } // process the logs in chunks @@ -153,7 +153,7 @@ LOOP: headersMap, err := u.syncer.L1QueryHeaders(chunk) if err != nil { - return nil, err + return nil, fmt.Errorf("L1QueryHeaders: %w", err) } for _, l := range chunk { @@ -163,13 +163,13 @@ LOOP: if header == nil { header, err = u.syncer.GetHeader(l.BlockNumber) if err != nil { - return nil, err + return nil, fmt.Errorf("GetHeader: %w", err) } } tmpUpdate, err := createL1InfoTreeUpdate(l, header) if err != nil { - return nil, err + return nil, fmt.Errorf("createL1InfoTreeUpdate: %w", err) } leafHash := HashLeafData(tmpUpdate.GER, tmpUpdate.ParentHash, tmpUpdate.Timestamp) @@ -185,7 +185,7 @@ LOOP: newRoot, err := tree.AddLeaf(uint32(u.latestUpdate.Index), leafHash) if err != nil { - return nil, err + return nil, fmt.Errorf("tree.AddLeaf: %w", err) } log.Debug("New L1 Index", "index", u.latestUpdate.Index, @@ -197,13 +197,13 @@ LOOP: ) if err = handleL1InfoTreeUpdate(hermezDb, u.latestUpdate); err != nil { - return nil, err + return nil, fmt.Errorf("handleL1InfoTreeUpdate: %w", err) } if err = hermezDb.WriteL1InfoTreeLeaf(u.latestUpdate.Index, leafHash); err != nil { - return nil, err + return nil, fmt.Errorf("WriteL1InfoTreeLeaf: %w", err) } if err = hermezDb.WriteL1InfoTreeRoot(common.BytesToHash(newRoot[:]), u.latestUpdate.Index); err != nil { - return nil, err + return nil, fmt.Errorf("WriteL1InfoTreeRoot: %w", err) } processed++ @@ -218,7 +218,7 @@ LOOP: u.progress = allLogs[len(allLogs)-1].BlockNumber + 1 } if err = stages.SaveStageProgress(tx, stages.L1InfoTree, u.progress); err != nil { - return nil, err + return nil, fmt.Errorf("SaveStageProgress: %w", err) } return allLogs, nil @@ -242,7 +242,7 @@ func chunkLogs(slice []types.Log, chunkSize int) [][]types.Log { func InitialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { leaves, err := hermezDb.GetAllL1InfoTreeLeaves() if err != nil { - return nil, err + return nil, fmt.Errorf("GetAllL1InfoTreeLeaves: %w", err) } allLeaves := make([][32]byte, len(leaves)) @@ -252,7 +252,7 @@ func InitialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { tree, err := NewL1InfoTree(32, allLeaves) if err != nil { - return nil, err + return nil, fmt.Errorf("NewL1InfoTree: %w", err) } return tree, nil @@ -289,10 +289,10 @@ func handleL1InfoTreeUpdate( ) error { var err error if err = hermezDb.WriteL1InfoTreeUpdate(update); err != nil { - return err + return fmt.Errorf("WriteL1InfoTreeUpdate: %w", err) } if err = hermezDb.WriteL1InfoTreeUpdateToGer(update); err != nil { - return err + return fmt.Errorf("WriteL1InfoTreeUpdateToGer: %w", err) } return nil } diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index e55ad91f707..31535c38d10 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -137,7 +137,7 @@ func SpawnStageBatches( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return fmt.Errorf("failed to open tx, %w", err) + return fmt.Errorf("cfg.db.BeginRw, %w", err) } defer tx.Rollback() } @@ -147,7 +147,7 @@ func SpawnStageBatches( stageProgressBlockNo, err := stages.GetStageProgress(tx, stages.Batches) if err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("GetStageProgress: %w", err) } //// BISECT //// @@ -167,20 +167,20 @@ func SpawnStageBatches( // get batch for batches progress stageProgressBatchNo, err := hermezDb.GetBatchNoByL2Block(stageProgressBlockNo) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get batch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } startSyncTime := time.Now() latestForkId, err := stages.GetStageProgress(tx, stages.ForkId) if err != nil { - return err + return fmt.Errorf("GetStageProgress: %w", err) } dsQueryClient, stopDsClient, err := newStreamClient(ctx, cfg, latestForkId) if err != nil { log.Warn(fmt.Sprintf("[%s] %s", logPrefix, err)) - return err + return fmt.Errorf("newStreamClient: %w", err) } defer stopDsClient() @@ -192,8 +192,7 @@ func SpawnStageBatches( return nil default: } - highestDSL2Block, err = dsQueryClient.GetLatestL2Block() - if err != nil { + if highestDSL2Block, err = dsQueryClient.GetLatestL2Block(); err != nil { // if we return error, stage will replay and block all other stages log.Warn(fmt.Sprintf("[%s] Failed to get latest l2 block from datastream: %v", logPrefix, err)) return nil @@ -219,7 +218,7 @@ func SpawnStageBatches( if highestDSL2Block.L2BlockNumber < stageProgressBlockNo { log.Info(fmt.Sprintf("[%s] Datastream behind, unwinding", logPrefix)) if _, err := unwindFn(highestDSL2Block.L2BlockNumber); err != nil { - return err + return fmt.Errorf("unwindFn: %w", err) } return nil } @@ -233,12 +232,12 @@ func SpawnStageBatches( _, highestL1InfoTreeIndex, err := hermezDb.GetLatestBlockL1InfoTreeIndexProgress() if err != nil { - return fmt.Errorf("failed to get highest used l1 info index, %w", err) + return fmt.Errorf("GetLatestBlockL1InfoTreeIndexProgress: %w", err) } stageExecProgress, err := stages.GetStageProgress(tx, stages.Execution) if err != nil { - return fmt.Errorf("failed to get stage exec progress, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } // just exit the stage early if there is more execution work to do @@ -251,12 +250,12 @@ func SpawnStageBatches( lastProcessedBlockHash, err := eriDb.ReadCanonicalHash(stageProgressBlockNo) if err != nil { - return fmt.Errorf("failed to read canonical hash for block %d: %w", stageProgressBlockNo, err) + return fmt.Errorf("ReadCanonicalHash %d: %w", stageProgressBlockNo, err) } batchProcessor, err := NewBatchesProcessor(ctx, logPrefix, tx, hermezDb, eriDb, cfg.zkCfg.SyncLimit, cfg.zkCfg.DebugLimit, cfg.zkCfg.DebugStepAfter, cfg.zkCfg.DebugStep, stageProgressBlockNo, stageProgressBatchNo, lastProcessedBlockHash, dsQueryClient, progressChan, cfg.chainConfig, cfg.miningConfig, unwindFn) if err != nil { - return err + return fmt.Errorf("NewBatchesProcessor: %w", err) } // start routine to download blocks and push them in a channel @@ -282,7 +281,7 @@ func SpawnStageBatches( if err == ErrorTriggeredUnwind { return nil } - return err + return fmt.Errorf("ProcessEntry: %w", err) } dsClientProgress.Store(batchProcessor.LastBlockHeight()) case <-ctx.Done(): @@ -301,10 +300,10 @@ func SpawnStageBatches( // commit progress from time to time if batchProcessor.TotalBlocksWritten() != prevAmountBlocksWritten && batchProcessor.TotalBlocksWritten()%STAGE_PROGRESS_SAVE == 0 { if err = saveStageProgress(tx, logPrefix, batchProcessor.HighestHashableL2BlockNo(), batchProcessor.HighestSeenBatchNumber(), batchProcessor.LastBlockHeight(), batchProcessor.LastForkId()); err != nil { - return err + return fmt.Errorf("saveStageProgress: %w", err) } if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(batchProcessor.LastBlockHeight(), highestL1InfoTreeIndex); err != nil { - return err + return fmt.Errorf("WriteBlockL1InfoTreeIndexProgress: %w", err) } if freshTx { @@ -331,10 +330,10 @@ func SpawnStageBatches( } if err = saveStageProgress(tx, logPrefix, batchProcessor.HighestHashableL2BlockNo(), batchProcessor.HighestSeenBatchNumber(), batchProcessor.LastBlockHeight(), batchProcessor.LastForkId()); err != nil { - return err + return fmt.Errorf("saveStageProgress: %w", err) } if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(batchProcessor.LastBlockHeight(), highestL1InfoTreeIndex); err != nil { - return err + return fmt.Errorf("WriteBlockL1InfoTreeIndexProgress: %w", err) } // stop printing blocks written progress routine @@ -343,7 +342,7 @@ func SpawnStageBatches( if freshTx { if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to commit tx, %w", err) + return fmt.Errorf("tx.Commit: %w", err) } } @@ -354,27 +353,27 @@ func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, h var err error // store the highest hashable block number if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, highestHashableL2BlockNo); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, highestSeenBatchNo); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } // store the highest seen forkid if err := stages.SaveStageProgress(tx, stages.ForkId, lastForkId); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } // save the latest verified batch number as well just in case this node is upgraded // to a sequencer in the future if err := stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, highestSeenBatchNo); err != nil { - return fmt.Errorf("save stage progress error: %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "lastBlockHeight", lastBlockHeight) if err := stages.SaveStageProgress(tx, stages.Batches, lastBlockHeight); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } return nil @@ -385,9 +384,8 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -404,20 +402,20 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////// highestVerifiedBatch, err := stages.GetStageProgress(tx, stages.L1VerificationsBatchNo) if err != nil { - return errors.New("could not retrieve l1 verifications batch no progress") + return fmt.Errorf("GetStageProgress: %w", err) } fromBatchPrev, err := hermezDb.GetBatchNoByL2Block(fromBlock - 1) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get batch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } fromBatch, err := hermezDb.GetBatchNoByL2Block(fromBlock) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get fromBatch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } toBatch, err := hermezDb.GetBatchNoByL2Block(toBlock) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get toBatch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } // if previous block has different batch, delete the "fromBlock" one @@ -429,16 +427,16 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c if fromBatch <= toBatch { if err := hermezDb.DeleteForkIds(fromBatch, toBatch); err != nil { - return fmt.Errorf("delete fork ids error: %v", err) + return fmt.Errorf("DeleteForkIds: %w", err) } if err := hermezDb.DeleteBatchGlobalExitRoots(fromBatch); err != nil { - return fmt.Errorf("delete batch global exit roots error: %v", err) + return fmt.Errorf("DeleteBatchGlobalExitRoots: %w", err) } } if highestVerifiedBatch >= fromBatch { if err := rawdb.DeleteForkchoiceFinalized(tx); err != nil { - return fmt.Errorf("delete forkchoice finalized error: %v", err) + return fmt.Errorf("DeleteForkchoiceFinalized: %w", err) } } ///////////////////////////////////////// @@ -448,19 +446,19 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // cannot unwind EffectiveGasPricePercentage here although it is written in stage batches, because we have already deleted the transactions if err := hermezDb.DeleteStateRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete state roots error: %v", err) + return fmt.Errorf("DeleteStateRoots: %w", err) } if err := hermezDb.DeleteIntermediateTxStateRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete intermediate tx state roots error: %v", err) + return fmt.Errorf("DeleteIntermediateTxStateRoots: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, fromBlock); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("TruncateBlocks: %w", err) } if err := hermezDb.DeleteBlockBatches(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block batches error: %v", err) + return fmt.Errorf("DeleteBlockBatches: %w", err) } if err := hermezDb.DeleteForkIdBlock(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete fork id block error: %v", err) + return fmt.Errorf("DeleteForkIdBlock: %w", err) } ////////////////////////////////////////////////////// @@ -469,31 +467,31 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////////////////////////// gers, err := hermezDb.GetBlockGlobalExitRoots(fromBlock, toBlock) if err != nil { - return fmt.Errorf("get block global exit roots error: %v", err) + return fmt.Errorf("GetBlockGlobalExitRoots: %w", err) } if err := hermezDb.DeleteGlobalExitRoots(&gers); err != nil { - return fmt.Errorf("delete global exit roots error: %v", err) + return fmt.Errorf("DeleteGlobalExitRoots: %w", err) } if err = hermezDb.DeleteLatestUsedGers(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete latest used gers error: %v", err) + return fmt.Errorf("DeleteLatestUsedGers: %w", err) } if err := hermezDb.DeleteBlockGlobalExitRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block global exit roots error: %v", err) + return fmt.Errorf("DeleteBlockGlobalExitRoots: %w", err) } if err := hermezDb.DeleteBlockL1BlockHashes(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block l1 block hashes error: %v", err) + return fmt.Errorf("DeleteBlockL1BlockHashes: %w", err) } if err = hermezDb.DeleteReusedL1InfoTreeIndexes(fromBlock, toBlock); err != nil { - return fmt.Errorf("write reused l1 info tree index error: %w", err) + return fmt.Errorf("DeleteReusedL1InfoTreeIndexes: %w", err) } if err = hermezDb.DeleteBatchEnds(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete batch ends error: %v", err) + return fmt.Errorf("DeleteBatchEnds: %w", err) } /////////////////////////////////////////////////////// @@ -504,7 +502,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c stageprogress = fromBlock - 1 } if err := stages.SaveStageProgress(tx, stages.Batches, stageprogress); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "fromBlock", stageprogress) @@ -516,15 +514,15 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // this is the last block of the previous batch and the highest hashable block for verifications lastBatchHighestBlock, _, err := hermezDb.GetHighestBlockInBatch(fromBatchPrev - 1) if err != nil { - return fmt.Errorf("get batch highest block error: %w", err) + return fmt.Errorf("GetHighestBlockInBatch: %w", err) } if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, lastBatchHighestBlock); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, fromBatchPrev); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } ///////////////////////////////////////////////////// @@ -536,10 +534,10 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////// forkId, err := hermezDb.GetForkId(fromBatchPrev) if err != nil { - return fmt.Errorf("get fork id error: %v", err) + return fmt.Errorf("GetForkId: %w", err) } if err := stages.SaveStageProgress(tx, stages.ForkId, forkId); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } ///////////////////////////////////////// // finish store the highest seen forkid// @@ -554,7 +552,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c } if err := hermezDb.DeleteBlockL1InfoTreeIndexes(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block l1 block hashes error: %v", err) + return fmt.Errorf("DeleteBlockL1InfoTreeIndexes: %w", err) } //////////////////////////////////////////////// @@ -562,15 +560,15 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c //////////////////////////////////////////////// if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, fromBatchPrev); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err := u.Done(tx); err != nil { - return err + return fmt.Errorf("u.Done: %w", err) } if !useExternalTx { if err := tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -582,7 +580,7 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -594,26 +592,32 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx toBlock, err := stages.GetStageProgress(tx, stages.Batches) if err != nil { - return fmt.Errorf("get stage datastream progress error: %v", err) + return fmt.Errorf("GetStageProgress: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, 1); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("TruncateBlocks: %w", err) } - hermezDb.DeleteForkIds(0, toBlock) - hermezDb.DeleteBlockBatches(0, toBlock) - hermezDb.DeleteBlockGlobalExitRoots(0, toBlock) + if err := hermezDb.DeleteForkIds(0, toBlock); err != nil { + return fmt.Errorf("DeleteForkIds: %w", err) + } + if err := hermezDb.DeleteBlockBatches(0, toBlock); err != nil { + return fmt.Errorf("DeleteBlockBatches: %w", err) + } + if hermezDb.DeleteBlockGlobalExitRoots(0, toBlock); err != nil { + return fmt.Errorf("DeleteBlockGlobalExitRoots: %w", err) + } log.Info(fmt.Sprintf("[%s] Deleted headers, bodies, forkIds and blockBatches.", logPrefix)) log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "stageProgress", 0) if err := stages.SaveStageProgress(tx, stages.Batches, 0); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %v", err) } if !useExternalTx { if err := tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -635,17 +639,17 @@ func rollback( dsQueryClient.PrepUnwind() ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(eriDb, hermezDb, dsQueryClient, latestDSBlockNum) if err != nil { - return 0, err + return 0, fmt.Errorf("findCommonAncestor: %w", err) } log.Debug(fmt.Sprintf("[%s] The common ancestor for datastream and db is block %d (%s)", logPrefix, ancestorBlockNum, ancestorBlockHash)) unwindBlockNum, unwindBlockHash, batchNum, err := getUnwindPoint(eriDb, hermezDb, ancestorBlockNum, ancestorBlockHash) if err != nil { - return 0, err + return 0, fmt.Errorf("getUnwindPoint: %w", err) } if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, batchNum-1); err != nil { - return 0, err + return 0, fmt.Errorf("SaveStageProgress: %w", err) } log.Warn(fmt.Sprintf("[%s] Unwinding to block %d (%s)", logPrefix, unwindBlockNum, unwindBlockHash)) @@ -681,17 +685,17 @@ func findCommonAncestor( if err != nil && // the required block might not be in the data stream, so ignore that error !errors.Is(err, types.ErrBadFromBookmark) { - return 0, emptyHash, fmt.Errorf("GetL2BlockByNumber: failed to get l2 block %d from datastream: %w", midBlockNum, err) + return 0, emptyHash, fmt.Errorf("GetL2BlockByNumber block %d: %w", midBlockNum, err) } midBlockDbHash, err := db.ReadCanonicalHash(midBlockNum) if err != nil { - return 0, emptyHash, fmt.Errorf("ReadCanonicalHash: failed to get canonical hash for block %d: %w", midBlockNum, err) + return 0, emptyHash, fmt.Errorf("ReadCanonicalHash block %d: %w", midBlockNum, err) } dbBatchNum, err := hermezDb.GetBatchNoByL2Block(midBlockNum) if err != nil { - return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block: failed to get batch number for block %d: %w", midBlockNum, err) + return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block block %d: %w", midBlockNum, err) } if midBlockDataStream != nil && @@ -717,7 +721,7 @@ func findCommonAncestor( func getUnwindPoint(eriDb erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHermezDb, blockNum uint64, blockHash common.Hash) (uint64, common.Hash, uint64, error) { batchNum, err := hermezDb.GetBatchNoByL2Block(blockNum) if err != nil { - return 0, emptyHash, 0, err + return 0, emptyHash, 0, fmt.Errorf("GetBatchNoByL2Block: block %d (%s): %w", blockNum, blockHash, err) } if batchNum == 0 { @@ -727,12 +731,12 @@ func getUnwindPoint(eriDb erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHer unwindBlockNum, _, err := hermezDb.GetHighestBlockInBatch(batchNum - 1) if err != nil { - return 0, emptyHash, 0, fmt.Errorf("GetHighestBlockInBatch: batch %d: %w", batchNum-1, err) + return 0, emptyHash, 0, fmt.Errorf("GetHighestBlockInBatch batch %d: %w", batchNum-1, err) } unwindBlockHash, err := eriDb.ReadCanonicalHash(unwindBlockNum) if err != nil { - return 0, emptyHash, 0, fmt.Errorf("ReadCanonicalHash: block %d: %w", unwindBlockNum, err) + return 0, emptyHash, 0, fmt.Errorf("ReadCanonicalHash block %d: %w", unwindBlockNum, err) } return unwindBlockNum, unwindBlockHash, batchNum, nil diff --git a/zk/stages/stage_l1_info_tree.go b/zk/stages/stage_l1_info_tree.go index 7547d240230..252a29be3f1 100644 --- a/zk/stages/stage_l1_info_tree.go +++ b/zk/stages/stage_l1_info_tree.go @@ -42,18 +42,18 @@ func SpawnL1InfoTreeStage( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } if err := cfg.updater.WarmUp(tx); err != nil { - return err + return fmt.Errorf("cfg.updater.WarmUp: %w", err) } allLogs, err := cfg.updater.CheckForInfoTreeUpdates(logPrefix, tx) if err != nil { - return err + return fmt.Errorf("CheckForInfoTreeUpdates: %w", err) } var latestIndex uint64 @@ -65,7 +65,7 @@ func SpawnL1InfoTreeStage( if freshTx { if funcErr = tx.Commit(); funcErr != nil { - return funcErr + return fmt.Errorf("tx.Commit: %w", funcErr) } } diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1syncer.go index 39da1fa22f3..5c78f7209ec 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1syncer.go @@ -95,7 +95,7 @@ func SpawnStageL1Syncer( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return fmt.Errorf("failed to open tx, %w", err) + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -106,7 +106,7 @@ func SpawnStageL1Syncer( // get l1 block progress from this stage's progress l1BlockProgress, err := stages.GetStageProgress(tx, stages.L1Syncer) if err != nil { - return fmt.Errorf("failed to get l1 progress block, %w", err) + return fmt.Errorf("GetStageProgress, %w", err) } // start syncer if not started @@ -149,8 +149,7 @@ Loop: continue } if err := hermezDb.WriteSequence(info.L1BlockNo, info.BatchNo, info.L1TxHash, info.StateRoot, info.L1InfoRoot); err != nil { - funcErr = fmt.Errorf("failed to write batch info, %w", err) - return funcErr + return fmt.Errorf("WriteSequence: %w", err) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -158,8 +157,7 @@ Loop: newSequencesCount++ case logRollbackBatches: if err := hermezDb.RollbackSequences(info.BatchNo); err != nil { - funcErr = fmt.Errorf("failed to write rollback sequence, %w", err) - return funcErr + return fmt.Errorf("RollbackSequences: %w", err) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -175,8 +173,7 @@ Loop: highestVerification = info } if err := hermezDb.WriteVerification(info.L1BlockNo, info.BatchNo, info.L1TxHash, info.StateRoot); err != nil { - funcErr = fmt.Errorf("failed to write verification for block %d, %w", info.L1BlockNo, err) - return funcErr + return fmt.Errorf("WriteVerification for block %d: %w", info.L1BlockNo, funcErr) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -206,19 +203,17 @@ Loop: log.Info(fmt.Sprintf("[%s] Saving L1 syncer progress", logPrefix), "latestCheckedBlock", latestCheckedBlock, "newVerificationsCount", newVerificationsCount, "newSequencesCount", newSequencesCount, "highestWrittenL1BlockNo", highestWrittenL1BlockNo) if err := stages.SaveStageProgress(tx, stages.L1Syncer, highestWrittenL1BlockNo); err != nil { - funcErr = fmt.Errorf("failed to save stage progress, %w", err) - return funcErr + return fmt.Errorf("SaveStageProgress: %w", err) } if highestVerification.BatchNo > 0 { log.Info(fmt.Sprintf("[%s]", logPrefix), "highestVerificationBatchNo", highestVerification.BatchNo) if err := stages.SaveStageProgress(tx, stages.L1VerificationsBatchNo, highestVerification.BatchNo); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } } // State Root Verifications Check - err = verifyAgainstLocalBlocks(tx, hermezDb, logPrefix) - if err != nil { + if err = verifyAgainstLocalBlocks(tx, hermezDb, logPrefix); err != nil { if errors.Is(err, ErrStateRootMismatch) { panic(err) } @@ -231,8 +226,7 @@ Loop: if internalTxOpened { log.Debug("l1 sync: first cycle, committing tx") if err := tx.Commit(); err != nil { - funcErr = fmt.Errorf("failed to commit tx, %w", err) - return funcErr + return fmt.Errorf("tx.Commit: %w", err) } } @@ -325,7 +319,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the highest hashed block hashedBlockNo, err := stages.GetStageProgress(tx, stages.IntermediateHashes) if err != nil { - return fmt.Errorf("failed to get highest hashed block, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } // no need to check - interhashes has not yet run @@ -336,7 +330,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the highest verified block verifiedBlockNo, err := hermezDb.GetHighestVerifiedBlockNo() if err != nil { - return fmt.Errorf("failed to get highest verified block no, %w", err) + return fmt.Errorf("GetHighestVerifiedBlockNo: %w", err) } // no verifications on l1 @@ -356,7 +350,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the batch of the last hashed block hashedBatch, err := hermezDb.GetBatchNoByL2Block(hashedBlockNo) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return err + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } if hashedBatch == 0 { @@ -368,7 +362,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // find the higher blocknum for previous batch blockNumbers, err := hermezDb.GetL2BlockNosByBatch(hashedBatch) if err != nil { - return err + return fmt.Errorf("GetL2BlockNosByBatch: %w", err) } if len(blockNumbers) == 0 { @@ -386,18 +380,17 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // already checked highestChecked, err := stages.GetStageProgress(tx, stages.VerificationsStateRootCheck) if err != nil { - return fmt.Errorf("failed to get highest checked block, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } if highestChecked >= blockToCheck { return nil } if !sequencer.IsSequencer() { - err = blockComparison(tx, hermezDb, blockToCheck, logPrefix) - if err == nil { + if err = blockComparison(tx, hermezDb, blockToCheck, logPrefix); err == nil { log.Info(fmt.Sprintf("[%s] State root verified in block %d", logPrefix, blockToCheck)) if err := stages.SaveStageProgress(tx, stages.VerificationsStateRootCheck, verifiedBlockNo); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } } } @@ -408,12 +401,12 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi func blockComparison(tx kv.RwTx, hermezDb *hermez_db.HermezDb, blockNo uint64, logPrefix string) error { v, err := hermezDb.GetVerificationByL2BlockNo(blockNo) if err != nil { - return fmt.Errorf("failed to get verification by l2 block no, %w", err) + return fmt.Errorf("GetVerificationByL2BlockNo: %w", err) } block, err := rawdb.ReadBlockByNumber(tx, blockNo) if err != nil { - return fmt.Errorf("failed to read block by number, %w", err) + return fmt.Errorf("ReadBlockByNumber: %w", err) } if v == nil || block == nil { diff --git a/zk/syncer/l1_syncer.go b/zk/syncer/l1_syncer.go index 2d5f3984917..92247149eb6 100644 --- a/zk/syncer/l1_syncer.go +++ b/zk/syncer/l1_syncer.go @@ -205,17 +205,17 @@ func (s *L1Syncer) RunQueryBlocks(lastCheckedBlock uint64) { func (s *L1Syncer) GetHeader(number uint64) (*ethTypes.Header, error) { em := s.getNextEtherman() - return em.HeaderByNumber(context.Background(), new(big.Int).SetUint64(number)) + return em.HeaderByNumber(s.ctx, new(big.Int).SetUint64(number)) } func (s *L1Syncer) GetBlock(number uint64) (*ethTypes.Block, error) { em := s.getNextEtherman() - return em.BlockByNumber(context.Background(), new(big.Int).SetUint64(number)) + return em.BlockByNumber(s.ctx, new(big.Int).SetUint64(number)) } func (s *L1Syncer) GetTransaction(hash common.Hash) (ethTypes.Transaction, bool, error) { em := s.getNextEtherman() - return em.TransactionByHash(context.Background(), hash) + return em.TransactionByHash(s.ctx, hash) } func (s *L1Syncer) GetPreElderberryAccInputHash(ctx context.Context, addr *common.Address, batchNum uint64) (common.Hash, error) { From bbb4c5b845d4007523d8c722b5a02a343a0111b3 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Tue, 19 Nov 2024 11:30:38 +0000 Subject: [PATCH 63/88] fix(syncer-utils): elderberry validium decoder (#1470) --- zk/syncer/utils.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zk/syncer/utils.go b/zk/syncer/utils.go index a6ad885af74..a8e63273c0a 100644 --- a/zk/syncer/utils.go +++ b/zk/syncer/utils.go @@ -108,7 +108,9 @@ func DecodeSequenceBatchesCalldata(data []byte) (calldata interface{}, err error case contracts.SequenceBatchesIdv6_6: if method.Name == sequenceBatchesMethodName { return decodeElderberryBatchesCallData(unpackedCalldata), nil - } else { + } + case contracts.SequenceBatchesValidiumElderBerry: + if method.Name == sequenceBatchesValidiumMethodName { return decodeElderberryBatchesValidiumCallData(unpackedCalldata), nil } case contracts.SequenceBatchesBanana: @@ -120,6 +122,8 @@ func DecodeSequenceBatchesCalldata(data []byte) (calldata interface{}, err error default: return nil, fmt.Errorf("no decoder found for method signature: %s", methodSig) } + + return nil, fmt.Errorf("no decoder found for method signature: %s", methodSig) } type SequencedBatchBanana struct { From e55e3c53c7de10d86d04bdcef79d2d4468faf0e4 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Tue, 19 Nov 2024 11:32:05 +0000 Subject: [PATCH 64/88] tweak(txpool): trace logs for best (#1466) --- zk/txpool/pool_zk.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index 950b1990a02..c7a939abc71 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -154,12 +154,14 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG defer p.lock.Unlock() if p.isDeniedYieldingTransactions() { + log.Trace("Denied yielding transactions, cannot proceed") return false, 0, nil } // First wait for the corresponding block to arrive if p.lastSeenBlock.Load() < onTopOf { - return false, 0, nil // Too early + log.Trace("Block not yet arrived, too early to process", "lastSeenBlock", p.lastSeenBlock.Load(), "requiredBlock", onTopOf) + return false, 0, nil } isShanghai := p.isShanghai() @@ -180,8 +182,10 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG } mt := best.ms[i] + log.Trace("Processing transaction", "txID", mt.Tx.IDHash) if toSkip.Contains(mt.Tx.IDHash) { + log.Trace("Skipping transaction, already in toSkip", "txID", mt.Tx.IDHash) continue } @@ -189,26 +193,31 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG // remove ldn txs when not in london toRemove = append(toRemove, mt) toSkip.Add(mt.Tx.IDHash) + log.Trace("Removing London transaction in non-London environment", "txID", mt.Tx.IDHash) continue } if mt.Tx.Gas > transactionGasLimit { // Skip transactions with very large gas limit, these shouldn't enter the pool at all log.Debug("found a transaction in the pending pool with too high gas for tx - clear the tx pool") + log.Trace("Skipping transaction with too high gas", "txID", mt.Tx.IDHash, "gas", mt.Tx.Gas) continue } rlpTx, sender, isLocal, err := p.getRlpLocked(tx, mt.Tx.IDHash[:]) if err != nil { + log.Trace("Error getting RLP of transaction", "txID", mt.Tx.IDHash, "error", err) return false, count, err } if len(rlpTx) == 0 { toRemove = append(toRemove, mt) + log.Trace("Removing transaction with empty RLP", "txID", mt.Tx.IDHash) continue } // Skip transactions that require more blob gas than is available blobCount := uint64(len(mt.Tx.BlobHashes)) if blobCount*fixedgas.BlobGasPerBlob > availableBlobGas { + log.Trace("Skipping transaction due to insufficient blob gas", "txID", mt.Tx.IDHash, "requiredBlobGas", blobCount*fixedgas.BlobGasPerBlob, "availableBlobGas", availableBlobGas) continue } availableBlobGas -= blobCount * fixedgas.BlobGasPerBlob @@ -219,6 +228,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG intrinsicGas, _ := CalcIntrinsicGas(uint64(mt.Tx.DataLen), uint64(mt.Tx.DataNonZeroLen), nil, mt.Tx.Creation, true, true, isShanghai) if intrinsicGas > availableGas { // we might find another TX with a low enough intrinsic gas to include so carry on + log.Trace("Skipping transaction due to insufficient gas", "txID", mt.Tx.IDHash, "intrinsicGas", intrinsicGas, "availableGas", availableGas) continue } @@ -226,6 +236,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG availableGas -= intrinsicGas } + log.Trace("Including transaction", "txID", mt.Tx.IDHash) txs.Txs[count] = rlpTx txs.TxIds[count] = mt.Tx.IDHash copy(txs.Senders.At(count), sender.Bytes()) @@ -238,6 +249,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG if len(toRemove) > 0 { for _, mt := range toRemove { p.pending.Remove(mt) + log.Trace("Removed transaction from pending pool", "txID", mt.Tx.IDHash) } } return true, count, nil From ebe2d7dadb7366fecbe69d70cdf570910e082ce3 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Tue, 19 Nov 2024 11:33:26 +0000 Subject: [PATCH 65/88] tweak(zkevm_api): accinput batch 0/1 (#1464) * tweak(zkevm_api): accinput batch 0/1 * tweak(zkevm_api): accinput batch 0/1 --- turbo/jsonrpc/zkevm_api.go | 20 ++++++-------------- turbo/jsonrpc/zkevm_api_test.go | 2 +- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index f6f7a2911bb..8abe80b8c9b 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -731,6 +731,11 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get sequence range data for batch %d: %w", batchNum, err) } + // if we are asking for genesis return 0x0..0 + if batchNum == 0 && prevSequence.BatchNo == 0 { + return &common.Hash{}, nil + } + if prevSequence == nil || batchSequence == nil { var missing string if prevSequence == nil && batchSequence == nil { @@ -743,16 +748,6 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) } - // if we are asking for the injected batch or genesis return 0x0..0 - if (batchNum == 0 || batchNum == 1) && prevSequence.BatchNo == 0 { - return &common.Hash{}, nil - } - - // if prev is 0, set to 1 (injected batch) - if prevSequence.BatchNo == 0 { - prevSequence.BatchNo = 1 - } - // get batch range for sequence prevSequenceBatch, currentSequenceBatch := prevSequence.BatchNo, batchSequence.BatchNo // get call data for tx @@ -789,11 +784,8 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) } - accInputHash = &prevSequenceAccinputHash - if prevSequenceBatch == 0 { - return - } // calculate acc input hash + accInputHash = &prevSequenceAccinputHash for i := 0; i < int(batchNum-prevSequenceBatch); i++ { accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) prevSequenceAccinputHash = *accInputHash diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index c9cda1e73f8..c812cd53230 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -480,7 +480,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash(common.Hash{}.String()), batch.AccInputHash) + assert.Equal(common.HexToHash("0x97d1524156ccb46723e5c3c87951da9a390499ba288161d879df1dbc03d49afc"), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) From b5d8201827411dd9db909f91dbf389d2ae61377b Mon Sep 17 00:00:00 2001 From: Laia Soler Date: Tue, 19 Nov 2024 18:14:57 +0100 Subject: [PATCH 66/88] Fix modexp check bytes length --- core/vm/contracts_zkevm.go | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 4c80848f898..76263456c79 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -303,10 +303,8 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { // Retrieve the operands and execute the exponentiation var ( base = new(big.Int).SetBytes(getData(input, 0, baseLen.Uint64())) - exp = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64())) mod = new(big.Int).SetBytes(getData(input, baseLen.Uint64()+expLen.Uint64(), modLen.Uint64())) baseBitLen = base.BitLen() - expBitLen = exp.BitLen() modBitLen = mod.BitLen() ) @@ -314,17 +312,16 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { // - if mod = 0 we consume gas as normal // - if base is 0 and mod < 8192 we consume gas as normal // - if neither of the above are true we check for reverts and return 0 gas fee - - if modBitLen == 0 { + if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { + return 0 + } else if modBitLen == 0 { // consume as normal - will return 0 } else if baseBitLen == 0 { - if modBitLen > 8192 { + if modLen.Uint64() > 1024 { return 0 } else { // consume as normal - will return 0 } - } else if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { - return 0 } // Retrieve the head 32 bytes of exp for the adjusted exponent length @@ -423,27 +420,26 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { var ( v []byte baseBitLen = base.BitLen() - expBitLen = exp.BitLen() modBitLen = mod.BitLen() ) + // limit to 8192 bits for base, exp, and mod in ZK + if baseLen > 1024 || expLen > 1024 || modLen > 1024 { + return nil, ErrExecutionReverted + } + if modBitLen == 0 { return common.LeftPadBytes([]byte{}, int(modLen)), nil } if baseBitLen == 0 { - if modBitLen > 8192 { + if modLen > 1024 { return nil, ErrExecutionReverted } else { return common.LeftPadBytes([]byte{}, int(modLen)), nil } } - // limit to 8192 bits for base, exp, and mod in ZK - if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { - return nil, ErrExecutionReverted - } - switch { case base.Cmp(libcommon.Big1) == 0: //If base == 1, then we can just return base % mod (if mod >= 1, which it is) From b8d3c799b361c84f1b39832566d3965e07b6f83a Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:34:54 +0000 Subject: [PATCH 67/88] preload sender and check for errors (#1480) --- zk/stages/stage_sequence_execute.go | 2 +- zk/stages/stage_sequence_execute_blocks.go | 8 ++++++ .../stage_sequence_execute_transactions.go | 28 ++++++++++++++----- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 43883d8f82a..9c82e0f4b9d 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -336,7 +336,7 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(newLogs), "latestIndex", latestIndex) default: if batchState.isLimboRecovery() { - batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) + batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash, executionAt) if err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 282adc7c41b..d801fecf7fe 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -300,6 +300,14 @@ func addSenders( cryptoContext := secp256k1.ContextForThread(1) senders := make([]common.Address, 0, len(finalTransactions)) for _, transaction := range finalTransactions { + from, ok := transaction.GetSender() + if ok { + senders = append(senders, from) + continue + } + + // shouldn't be hit as we preload this value before processing the transaction + // to look for errors in handling it. from, err := signer.SenderWithContext(cryptoContext, transaction) if err != nil { return err diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 713dc462d39..97f395be3e1 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" ) func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, []common.Hash, bool, error) { @@ -38,7 +39,7 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio if allConditionsOk, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, 0, alreadyYielded); err != nil { return err } - yieldedTxs, yieldedIds, toRemove, err := extractTransactionsFromSlot(&slots) + yieldedTxs, yieldedIds, toRemove, err := extractTransactionsFromSlot(&slots, executionAt, cfg) if err != nil { return err } @@ -55,7 +56,7 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio return transactions, ids, allConditionsOk, err } -func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { +func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash, executionAt uint64) ([]types.Transaction, error) { cfg.txPool.LockFlusher() defer cfg.txPool.UnlockFlusher() @@ -70,7 +71,7 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm if slots != nil { // ignore the toRemove value here, we know the RLP will be sound as we had to read it from the pool // in the first place to get it into limbo - transactions, _, _, err = extractTransactionsFromSlot(slots) + transactions, _, _, err = extractTransactionsFromSlot(slots, executionAt, cfg) if err != nil { return err } @@ -84,10 +85,12 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm return transactions, nil } -func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []common.Hash, []common.Hash, error) { +func extractTransactionsFromSlot(slot *types2.TxsRlp, currentHeight uint64, cfg SequenceBlockCfg) ([]types.Transaction, []common.Hash, []common.Hash, error) { ids := make([]common.Hash, 0, len(slot.TxIds)) transactions := make([]types.Transaction, 0, len(slot.Txs)) toRemove := make([]common.Hash, 0) + signer := types.MakeSigner(cfg.chainConfig, currentHeight, 0) + cryptoContext := secp256k1.ContextForThread(1) for idx, txBytes := range slot.Txs { transaction, err := types.DecodeTransaction(txBytes) if err == io.EOF { @@ -96,12 +99,23 @@ func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []co if err != nil { // we have a transaction that cannot be decoded or a similar issue. We don't want to handle // this tx so just WARN about it and remove it from the pool and continue - log.Warn("Failed to decode transaction from pool, skipping and removing from pool", "error", err) + log.Warn("[extractTransaction] Failed to decode transaction from pool, skipping and removing from pool", + "error", err, + "id", slot.TxIds[idx]) toRemove = append(toRemove, slot.TxIds[idx]) continue } - var sender common.Address - copy(sender[:], slot.Senders.At(idx)) + + // now attempt to recover the sender + sender, err := signer.SenderWithContext(cryptoContext, transaction) + if err != nil { + log.Warn("[extractTransaction] Failed to recover sender from transaction, skipping and removing from pool", + "error", err, + "hash", transaction.Hash()) + toRemove = append(toRemove, slot.TxIds[idx]) + continue + } + transaction.SetSender(sender) transactions = append(transactions, transaction) ids = append(ids, slot.TxIds[idx]) From fb15f1f164a4538dc37b78566dec0866fd6ea23f Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:10:22 +0000 Subject: [PATCH 68/88] discard transactions from the pool that error during execution (#1490) --- zk/stages/stage_sequence_execute.go | 17 ++++++++++++++--- zk/stages/stage_sequence_execute_state.go | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 9c82e0f4b9d..e1cca259113 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -378,6 +378,7 @@ func sequencingBatchStep( log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } + badTxIndexes := make([]int, 0) for i, transaction := range batchState.blockState.transactionsForInclusion { txHash := transaction.Hash() effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) @@ -412,9 +413,12 @@ func sequencingBatchStep( continue } - // if running in normal operation mode and error != nil then just allow the code to continue - // It is safe because this approach ensures that the problematic transaction (the one that caused err != nil to be returned) is kept in yielded - // Each transaction in yielded will be reevaluated at the end of each batch + // if we have an error at this point something has gone wrong, either in the pool or otherwise + // to stop the pool growing and hampering further processing of good transactions here + // we mark it for being discarded + log.Warn(fmt.Sprintf("[%s] error adding transaction to batch, discarding from pool", logPrefix), "hash", txHash, "err", err) + badTxIndexes = append(badTxIndexes, i) + batchState.blockState.transactionsToDiscard = append(batchState.blockState.transactionsToDiscard, batchState.blockState.transactionHashesToSlots[txHash]) } switch anyOverflow { @@ -501,6 +505,12 @@ func sequencingBatchStep( } } + // remove transactions that have been marked for removal + for i := len(badTxIndexes) - 1; i >= 0; i-- { + idx := badTxIndexes[i] + batchState.blockState.transactionsForInclusion = append(batchState.blockState.transactionsForInclusion[:idx], batchState.blockState.transactionsForInclusion[idx+1:]...) + } + if batchState.isL1Recovery() { // just go into the normal loop waiting for new transactions to signal that the recovery // has finished as far as it can go @@ -523,6 +533,7 @@ func sequencingBatchStep( } cfg.txPool.RemoveMinedTransactions(batchState.blockState.builtBlockElements.txSlots) + cfg.txPool.RemoveMinedTransactions(batchState.blockState.transactionsToDiscard) if batchState.isLimboRecovery() { stateRoot := block.Root() diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 4ce99180806..d5af44c9df1 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -257,6 +257,7 @@ type BlockState struct { transactionHashesToSlots map[common.Hash]common.Hash builtBlockElements BuiltBlockElements blockL1RecoveryData *zktx.DecodedBatchL2Data + transactionsToDiscard []common.Hash } func newBlockState() *BlockState { From 2da9cf21c88b2abe20da6eb2130b0997940a1142 Mon Sep 17 00:00:00 2001 From: Scott Fairclough Date: Wed, 20 Nov 2024 11:55:14 +0000 Subject: [PATCH 69/88] adding more unit tests around modexp --- core/vm/contracts_zkevm_test.go | 171 ++++++++++++++++++++++++++++---- 1 file changed, 149 insertions(+), 22 deletions(-) diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go index dba8d446e4d..39feedc1640 100644 --- a/core/vm/contracts_zkevm_test.go +++ b/core/vm/contracts_zkevm_test.go @@ -8,12 +8,13 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/smt/pkg/utils" ) var ( - big0 = big.NewInt(0) - big10 = big.NewInt(10) - big8194 = big.NewInt(0).Lsh(big.NewInt(1), 8194) + big0 = "0x0" + big10 = "0xA" + big8194 = "0x4362992542477663717851936978935131449793056647984930769727331144758885327396477180865056938100807948895829167184628101092439483280708736254443919060310219196315625195368771958154439300912192820638787406454366552787087546170437298240485162215607545204071601010143199668800040318400106143347203621191223523809402006520781902615644021249458240059389704141174204983375715675011074785117376352222470062777399781626711300563259602464423681025754018312053305974263344188969629529771248980526071030076659596905462974889729109472300110511532180826007171046803782796673989031518735406948199987603844482062620200462245085965970061368422995866518188131145286022923313720886659881297584554541006505638064672021710493743985235686765784724749625581242661619542957739331509712669629981483974047475025439881560468087294466997834483879428025054448330826861632628266300549124108089243710259641107036642083513218529644197458275019683869291929836739053709535161089793752074107911059764290862446321738763309837359967845656970965642396469704242225935055025258110445450634513533474484631974552083514150702181347159662776937735822665260280348854141881022681248016522901983338033429758615315744308323914202315651871629411120219742486244363183380691816463891709919510110954240032816474235720019110994911047415254041975362327446394608846423841233425623767284758856151474904877925994910414614465195424699289652133941755141296099005677668732049124312838917214149491218297488380914814491105455781163479225033689420594030284158477549798519467232755078651263112612317572716372574593363046954327278252011977691163019820245155273233720318594772928716635063672142260864628461611968481104622431492431749909867366113451950834797800608124927450376812342775355778628245384946816936107284411835819804788348306184746491185178145806483026037404075624095095286158131104834707914358927865321235573218660677744740313402566273347778120205749965245195337469060954381619617093823714895798100908738469617471419018441897508079072102306423524152309082830871768887908361541754343376381968464399410155615898622815775892344363722387853443070119859753658392742823855018245989423311254495333155625236017152069285699234655850685334112036931033403492238456474895125688405840794462989547420387584356756721765358234099291470164452850555174702270601361451880127720093591313861274188952976928112060758758641675291904003261750442609017080654382603501735404588492856909066421614327125876363226305875802350647988746022661902863171584" ) func uint64To32Bytes(input int) []byte { @@ -68,39 +69,165 @@ func Test_ModExpZkevm_Gas(t *testing.T) { modExp := bigModExp_zkevm{enabled: true, eip2565: true} cases := map[string]struct { - base *big.Int - exp *big.Int - mod *big.Int - expected uint64 + base string + exp string + mod string + nonZeroGas bool + revert bool }{ - "simple test": {big10, big10, big10, 200}, - "0 mod - normal gas": {big10, big10, big0, 200}, - "base 0 - mod < 8192 - normal gas": {big0, big10, big10, 200}, - "base 0 - mod > 8192 - 0 gas": {big0, big10, big8194, 0}, - "base over 8192 - 0 gas": {big8194, big10, big10, 0}, - "exp over 8192 - 0 gas": {big10, big8194, big10, 0}, - "mod over 8192 - 0 gas": {big10, big10, big8194, 0}, + "simple test": { + big10, + big10, + big10, + true, + false, + }, + "0 mod - normal gas": { + big10, + big10, + big0, + true, + false, + }, + "base 0 - mod < 8192 - normal gas": { + big0, + big10, + big10, + true, + false, + }, + "base 0 - mod > 8192 - 0 gas": { + big0, + big10, + big8194, + false, + true, + }, + "base over 8192 - 0 gas": { + big8194, + big10, + big10, + false, + true, + }, + "exp over 8192 - 0 gas": { + big10, + big8194, + big10, + false, + true, + }, + "mod over 8192 - 0 gas": { + big10, + big10, + big8194, + false, + true, + }, + // tests beyond here are taken from the test vectors here https://github.com/0xPolygonHermez/zkevm-testvectors/blob/2b70027e11a427c15994713b41ef9b6794c2f3bb/tools-inputs/data/calldata/pre-modexp.json#L787 + "pre-modexp-test-case_0": { + "0x1", + "0x1", + "0x1111111111000000000000000000000000000000000000000000000000000000", + true, + false, + }, + "pre-modexp-test-case_1": { + "0x7", + "0x8", + "0x9", + true, + false, + }, + "pre-modexp_0": { + "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + "0x1", + "0x9", + true, + false, + }, + "pre-modexp_1": { + "0x00000000000000000000000000000000000000000000000000000000000001110000000000000000000000000000000000000000000000000000000000000000", + "0x1000", + "0x0000000000000000000000000000000000000000000000000000000000ffffff0000000000000000000000000000000000000000000000000000000000000000", + true, + false, + }, + "pre-modexp_10": { + "0x3", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + true, + false, + }, + "pre-modexp_12": { + "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x1", + "0x9", + true, + false, + }, + "pre-modexp_5": { + "0xf000000000000000000000000000000000000000000000000000000000000000", + "0xf000000000000000000000000000000000000000000000000000000000000010", + "0xf000000000000000000000000000000000000000000000000000000000000055", + true, + false, + }, + "pre-modexp_6": { + "0x20", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0xf000000000000000000000000000000000000000000000000000000000000055", + true, + false, + }, + "pre-modexp_7": { + "0x000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000002b32af000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004339f6e1061a", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001eb07e0ea000000000000000000000000000000000000000000000000000000056101669d", + true, + false, + }, + "pre-modexp_8": { + "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x1", + "0x00000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000", + true, + false, + }, } for name, test := range cases { t.Run(name, func(t *testing.T) { input := make([]byte, 0) - base := len(test.base.Bytes()) - exp := len(test.exp.Bytes()) - mod := len(test.mod.Bytes()) + bigBase := utils.ConvertHexToBigInt(test.base) + bigExp := utils.ConvertHexToBigInt(test.exp) + bigMod := utils.ConvertHexToBigInt(test.mod) + + base := len(bigBase.Bytes()) + exp := len(bigExp.Bytes()) + mod := len(bigMod.Bytes()) input = append(input, uint64To32Bytes(base)...) input = append(input, uint64To32Bytes(exp)...) input = append(input, uint64To32Bytes(mod)...) - input = append(input, uint64ToDeterminedBytes(test.base, base)...) - input = append(input, uint64ToDeterminedBytes(test.exp, exp)...) - input = append(input, uint64ToDeterminedBytes(test.mod, mod)...) + input = append(input, uint64ToDeterminedBytes(bigBase, base)...) + input = append(input, uint64ToDeterminedBytes(bigExp, exp)...) + input = append(input, uint64ToDeterminedBytes(bigMod, mod)...) gas := modExp.RequiredGas(input) + if test.nonZeroGas && gas == 0 { + t.Errorf("Expected non-zero gas") + } else if !test.nonZeroGas && gas != 0 { + t.Errorf("Expected zero gas") + } - if gas != test.expected { - t.Errorf("Expected %d, got %d", test.expected, gas) + _, err := modExp.Run(input) + if test.revert && err == nil { + t.Errorf("Expected revert") + } else if !test.revert && err != nil { + t.Errorf("Unexpected revert: %s", err) } }) } From 6e6b5bbe1b7aa688d4d6e506d359f49ba3dab210 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 21 Nov 2024 09:40:36 +0000 Subject: [PATCH 70/88] fix(ds): client - defer set streaming (#1492) --- zk/datastream/client/stream_client.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index 1c536b87095..3cb49c56924 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -509,6 +509,10 @@ func (c *StreamClient) handleSocketError(socketErr error) bool { // reads entries to the end of the stream // at end will wait for new entries to arrive func (c *StreamClient) readAllEntriesToChannel() (err error) { + defer func() { + c.setStreaming(false) + }() + c.setStreaming(true) c.stopReadingToChannel.Store(false) From 358d70305396308e717df0b0623b8c27c709d405 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 21 Nov 2024 12:10:09 +0000 Subject: [PATCH 71/88] AccInputHash - L1 Empty (#1455) * feat(zkevm_api): accinputhash node local calculation * fix(zkevm_api): tests * fix(zkevm_api): tests --- turbo/jsonrpc/zkevm_api.go | 276 +++++++++++++++++++++++++------- turbo/jsonrpc/zkevm_api_test.go | 4 +- zk/hermez_db/db.go | 2 +- zk/utils/acc_input_hash.go | 98 ++++++++++++ 4 files changed, 323 insertions(+), 57 deletions(-) diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 8abe80b8c9b..88284e09793 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -16,12 +16,15 @@ import ( zktypes "github.com/ledgerwatch/erigon/zk/types" + "math" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" eritypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -45,8 +48,6 @@ import ( "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" - "github.com/ledgerwatch/erigon/core/systemcontracts" - "math" ) var sha3UncleHash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") @@ -625,7 +626,7 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp batch.BatchL2Data = batchL2Data if api.l1Syncer != nil { - accInputHash, err := api.getAccInputHash(ctx, hermezDb, batchNo) + accInputHash, err := api.getAccInputHash(ctx, tx, hermezDb, batchNo) if err != nil { log.Error(fmt.Sprintf("failed to get acc input hash for batch %d: %v", batchNo, err)) } @@ -719,12 +720,7 @@ func (api *ZkEvmAPIImpl) fullTxBlockData(ctx context.Context, tx kv.Tx, hermezDb return batchBlocksJson, batchTransactionsJson, nil } -type SequenceReader interface { - GetRangeSequencesByBatch(batchNo uint64) (*zktypes.L1BatchInfo, *zktypes.L1BatchInfo, error) - GetForkId(batchNo uint64) (uint64, error) -} - -func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, batchNum uint64) (accInputHash *common.Hash, err error) { +func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, tx kv.Tx, db *hermez_db.HermezDbReader, batchNum uint64) (accInputHash *common.Hash, err error) { // get batch sequence prevSequence, batchSequence, err := db.GetRangeSequencesByBatch(batchNum) if err != nil { @@ -732,69 +728,209 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, } // if we are asking for genesis return 0x0..0 - if batchNum == 0 && prevSequence.BatchNo == 0 { + if (batchNum == 0) && prevSequence.BatchNo == 0 { return &common.Hash{}, nil } - if prevSequence == nil || batchSequence == nil { - var missing string - if prevSequence == nil && batchSequence == nil { - missing = "previous and current batch sequences" - } else if prevSequence == nil { - missing = "previous batch sequence" - } else { - missing = "current batch sequence" + /* + when both are nil (i.e. no data in the L1, we must calculate + the entire set of accInputHashes sequentially ourselves + */ + l1Empty := false + if prevSequence.BatchNo == 0 && batchSequence == nil { + prevSequence = &zktypes.L1BatchInfo{ + BatchNo: 0, } - return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) + + batchSequence = &zktypes.L1BatchInfo{ + BatchNo: batchNum, + } + l1Empty = true } // get batch range for sequence prevSequenceBatch, currentSequenceBatch := prevSequence.BatchNo, batchSequence.BatchNo - // get call data for tx - l1Transaction, _, err := api.l1Syncer.GetTransaction(batchSequence.L1TxHash) - if err != nil { - return nil, fmt.Errorf("failed to get transaction data for tx %s: %w", batchSequence.L1TxHash, err) - } - sequenceBatchesCalldata := l1Transaction.GetData() - if len(sequenceBatchesCalldata) < 10 { - return nil, fmt.Errorf("calldata for tx %s is too short", batchSequence.L1TxHash) - } - currentBatchForkId, err := db.GetForkId(currentSequenceBatch) - if err != nil { - return nil, fmt.Errorf("failed to get fork id for batch %d: %w", currentSequenceBatch, err) - } + if !l1Empty { + // get call data for tx + l1Transaction, _, err := api.l1Syncer.GetTransaction(batchSequence.L1TxHash) + if err != nil { + return nil, fmt.Errorf("failed to get transaction data for tx %s: %w", batchSequence.L1TxHash, err) + } + sequenceBatchesCalldata := l1Transaction.GetData() + if len(sequenceBatchesCalldata) < 10 { + return nil, fmt.Errorf("calldata for tx %s is too short", batchSequence.L1TxHash) + } - prevSequenceAccinputHash, err := api.GetccInputHash(ctx, currentBatchForkId, prevSequenceBatch) - if err != nil { - return nil, fmt.Errorf("failed to get old acc input hash for batch %d: %w", prevSequenceBatch, err) - } + currentBatchForkId, err := db.GetForkId(currentSequenceBatch) + if err != nil { + return nil, fmt.Errorf("failed to get fork id for batch %d: %w", currentSequenceBatch, err) + } - decodedSequenceInterface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata for tx %s: %w", batchSequence.L1TxHash, err) - } + // injected batch input hash + var prevSequenceAccInputHash common.Hash + if prevSequenceBatch == 0 { + injectedBatchForkId, err := db.GetForkId(1) + if err != nil { + return nil, fmt.Errorf("failed to get fork id for batch 1: %w", err) + } + prevSequenceAccInputHash, err = api.GetAccInputHash(ctx, injectedBatchForkId, 1) + if err != nil { + return nil, fmt.Errorf("failed to get acc input hash for batch 1: %w", err) + } + } else { + prevSequenceAccInputHash, err = api.GetAccInputHash(ctx, currentBatchForkId, prevSequenceBatch) + if err != nil { + return nil, fmt.Errorf("failed to get old acc input hash for batch %d: %w", prevSequenceBatch, err) + } + } - accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInterface) - if err != nil { - return nil, fmt.Errorf("failed to get accInputHash calculation func: %w", err) - } + // move along to the injected batch + if prevSequenceBatch == 0 { + prevSequenceBatch = 1 + } - if totalSequenceBatches == 0 || batchNum-prevSequenceBatch > uint64(totalSequenceBatches) { - return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) - } + decodedSequenceInterface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata for tx %s: %w", batchSequence.L1TxHash, err) + } + + accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInterface) + if err != nil { + return nil, fmt.Errorf("failed to get accInputHash calculation func: %w", err) + } + + if totalSequenceBatches == 0 || batchNum-prevSequenceBatch > uint64(totalSequenceBatches) { + return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) + } + + accInputHash = &prevSequenceAccInputHash + // calculate acc input hash + for i := 0; i < int(batchNum-prevSequenceBatch); i++ { + accInputHash = accInputHashCalcFn(prevSequenceAccInputHash, i) + prevSequenceAccInputHash = *accInputHash + } + } else { + // l1 is empty + + /* + Step 1: accInputHash of genesis is 0x00..00 + Step 2: get the accInputHash of the injected batch from the sequencer + Step 3: profit + */ + + // acc input hash of batch 0 is 0x00...00 + if batchNum == 0 { + return &common.Hash{}, nil + } - // calculate acc input hash - accInputHash = &prevSequenceAccinputHash - for i := 0; i < int(batchNum-prevSequenceBatch); i++ { - accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) - prevSequenceAccinputHash = *accInputHash + // get the accInputHash of the injected batch + prevSequenceAccInputHash, err := api.getInjectedBatchAccInputHashFromSequencer(api.config.Zk.L2RpcUrl) + if err != nil { + return nil, fmt.Errorf("failed to get acc input hash for injected batch: %w", err) + } + + if batchNum == 1 { + return prevSequenceAccInputHash, nil + } + + // pre-retrieve all info tree indexes + infoTreeIndexes, err := db.GetL1InfoTreeIndexToRoots() + if err != nil { + return nil, fmt.Errorf("failed to get l1 info tree indexes: %w", err) + } + if len(infoTreeIndexes) == 0 { + return nil, fmt.Errorf("no l1 info tree indexes found") + } + + // loop from batch 1 -> batch n (accInputHash to batch 1 is 0x0...0) + for i := 2; i <= int(batchNum); i++ { + currentForkId, err := db.GetForkId(uint64(i)) + if err != nil { + return nil, fmt.Errorf("failed to get fork id for batch %d: %w", i, err) + } + + /* + required data: + - sequencer addr - get current batch, get a block in it and use the coinbase + - batch data - construct the batchl2data from the db (think there's already a func to do this somewhere!) + - l1info root - from the DB + - limit timestamp - from the DB? + - forced block hash - nil afaik + - batch hash data - how to calculate for validium? + - global exit root - get from DB + - timestamp - from the DB + - batch transaction data - how to calculate for validium? + */ + + batchBlockNos, err := db.GetL2BlockNosByBatch(uint64(i)) + if err != nil { + return nil, fmt.Errorf("failed to get batch blocks for batch %d: %w", i, err) + } + batchBlocks := []*eritypes.Block{} + var batchTxs []eritypes.Transaction + var coinbase common.Address + for in, blockNo := range batchBlockNos { + block, err := api.ethApi.BaseAPI.blockByNumberWithSenders(ctx, tx, blockNo) + if err != nil { + return nil, fmt.Errorf("failed to get block %d: %w", blockNo, err) + } + if in == 0 { + coinbase = block.Coinbase() + } + batchBlocks = append(batchBlocks, block) + batchTxs = append(batchTxs, block.Transactions()...) + } + batchL2Data, err := utils.GenerateBatchDataFromDb(tx, db, batchBlocks, currentForkId) + if err != nil { + return nil, fmt.Errorf("failed to generate batch data for batch %d: %w", i, err) + } + + // pre-etrog data + ger, err := db.GetBlockGlobalExitRoot(batchBlockNos[len(batchBlockNos)-1]) + if err != nil { + return nil, fmt.Errorf("failed to get global exit root for batch %d: %w", i, err) + } + + // etrog data + l1InfoTreeUpdate, err := db.GetL1InfoTreeUpdateByGer(ger) + if err != nil { + return nil, fmt.Errorf("failed to get l1 info root for batch %d: %w", i, err) + } + l1InfoRoot := infoTreeIndexes[0] + timeStamp := uint64(0) + if l1InfoTreeUpdate != nil { + l1InfoRoot = infoTreeIndexes[l1InfoTreeUpdate.Index] + timeStamp = l1InfoTreeUpdate.Timestamp + } + + limitTs := batchBlocks[len(batchBlocks)-1].Time() + + inputs := zkUtils.AccHashInputs{ + OldAccInputHash: prevSequenceAccInputHash, + Sequencer: coinbase, + BatchData: batchL2Data, + L1InfoRoot: &l1InfoRoot, + LimitTimestamp: limitTs, + ForcedBlockHash: &common.Hash{}, + GlobalExitRoot: &ger, + Timestamp: timeStamp, + BatchTransactionData: nil, + IsValidium: len(api.config.Zk.DAUrl) > 0, + } + + accInputHash, err = zkUtils.CalculateAccInputHashByForkId(inputs, currentForkId) + if err != nil { + return nil, fmt.Errorf("failed to calculate accInputHash for batch %d: %w", i, err) + } + prevSequenceAccInputHash = accInputHash + } } return } -func (api *ZkEvmAPIImpl) GetccInputHash(ctx context.Context, currentBatchForkId, lastSequenceBatchNumber uint64) (accInputHash common.Hash, err error) { +func (api *ZkEvmAPIImpl) GetAccInputHash(ctx context.Context, currentBatchForkId, lastSequenceBatchNumber uint64) (accInputHash common.Hash, err error) { if currentBatchForkId < uint64(chain.ForkID8Elderberry) { accInputHash, err = api.l1Syncer.GetPreElderberryAccInputHash(ctx, &api.config.AddressRollup, lastSequenceBatchNumber) } else { @@ -1160,7 +1296,7 @@ func (api *ZkEvmAPIImpl) GetProverInput(ctx context.Context, batchNumber uint64, var oldAccInputHash common.Hash if batchNumber > 0 { - oaih, err := api.getAccInputHash(ctx, hDb, batchNumber-1) + oaih, err := api.getAccInputHash(ctx, tx, hDb, batchNumber-1) if err != nil { return nil, err } @@ -1918,3 +2054,35 @@ func (api *ZkEvmAPIImpl) GetRollupManagerAddress(ctx context.Context) (res json. return rollupManagerAddressJson, err } + +func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string) (*libcommon.Hash, error) { + res, err := client.JSONRPCCall(rpcUrl, "zkevm_getBatchByNumber", 1) + if err != nil { + return nil, err + } + + if res.Error != nil { + return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) + } + + var resultMap map[string]interface{} + + err = json.Unmarshal(res.Result, &resultMap) + if err != nil { + return nil, err + } + + hashValue, ok := resultMap["accInputHash"] + if !ok { + return nil, fmt.Errorf("accInputHash not found in response") + } + + hash, ok := hashValue.(string) + if !ok { + return nil, fmt.Errorf("accInputHash is not a string") + } + + decoded := libcommon.HexToHash(hash) + + return &decoded, nil +} diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index c812cd53230..715563f0909 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -457,7 +457,7 @@ func TestGetBatchByNumber(t *testing.T) { EthermanMock.EXPECT().TransactionByHash(ctx, common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97")).Return(txByHashResponse, true, nil).AnyTimes() storageAtResponse := []byte{} - EthermanMock.EXPECT().StorageAt(ctx, common.HexToAddress("0x000"), common.HexToHash("0xb5ad54240dc61c51d3a3e8d3f925722e010966ae263d67344c5fb60bddebddae"), nil).Return(storageAtResponse, nil).AnyTimes() + EthermanMock.EXPECT().StorageAt(ctx, common.HexToAddress("0x000"), common.HexToHash("0x5317d76ba28a4ffb21ed890613e0cdfc6847329136ad56bef014d23f3b6b63b2"), nil).Return(storageAtResponse, nil).AnyTimes() var response2 []byte response2 = append(response2, accInputHash.Bytes()...) @@ -480,7 +480,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash("0x97d1524156ccb46723e5c3c87951da9a390499ba288161d879df1dbc03d49afc"), batch.AccInputHash) + assert.Equal(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 01e8bdfe3e4..1607e875d89 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -929,7 +929,7 @@ func (db *HermezDbReader) GetBatchGlobalExitRootsProto(fromBatchNum, toBatchNum return gersProto, nil } -// GetBatchGlobalExitRoot deprecated: post etrog this will not work +// Deprecated: GetBatchGlobalExitRoot will not work post etrog func (db *HermezDbReader) GetBatchGlobalExitRoot(batchNum uint64) (*dstypes.GerUpdate, error) { gerUpdateBytes, err := db.tx.GetOne(GLOBAL_EXIT_ROOTS_BATCHES, Uint64ToBytes(batchNum)) if err != nil { diff --git a/zk/utils/acc_input_hash.go b/zk/utils/acc_input_hash.go index 7a66b899ee9..29c140d2018 100644 --- a/zk/utils/acc_input_hash.go +++ b/zk/utils/acc_input_hash.go @@ -6,6 +6,8 @@ import ( "github.com/iden3/go-iden3-crypto/keccak256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon-lib/chain" + "errors" ) func CalculateBananaAccInputHash( @@ -195,3 +197,99 @@ func calculatePreEtrogAccInputHash( func CalculateBatchHashData(transactions []byte) []byte { return crypto.Keccak256(transactions) } + +type AccHashInputs struct { + // common + OldAccInputHash *common.Hash + Sequencer common.Address + BatchData []byte + + // etrog + L1InfoRoot *common.Hash + LimitTimestamp uint64 + ForcedBlockHash *common.Hash + + // pre etrog + GlobalExitRoot *common.Hash + Timestamp uint64 + + // validium + IsValidium bool + BatchTransactionData *common.Hash +} + +func CalculateAccInputHashByForkId(input AccHashInputs, forkId uint64) (*common.Hash, error) { + var newAccInputHash *common.Hash + + if forkId >= uint64(chain.ForkID7Etrog) { + // etrog + if !input.IsValidium { + // rollup + if input.BatchData == nil || len(input.BatchData) == 0 { + return nil, errors.New("batchData is required for etrog rollup") + } + if input.L1InfoRoot == nil { + return nil, errors.New("l1InfoRoot is required for etrog rollup") + } + if input.ForcedBlockHash == nil { + return nil, errors.New("forcedBlockHash is required for etrog rollup") + } + newAccInputHash = CalculateEtrogAccInputHash( + *input.OldAccInputHash, + input.BatchData, + *input.L1InfoRoot, + input.LimitTimestamp, + input.Sequencer, + *input.ForcedBlockHash, + ) + } else { + // validium + if input.L1InfoRoot == nil { + return nil, errors.New("l1InfoRoot is required for etrog validium") + } + if input.ForcedBlockHash == nil { + return nil, errors.New("forcedBlockHash is required for etrog validium") + } + newAccInputHash = CalculateEtrogValidiumAccInputHash( + *input.OldAccInputHash, + *input.BatchTransactionData, + *input.L1InfoRoot, + input.LimitTimestamp, + input.Sequencer, + *input.ForcedBlockHash, + ) + } + } else { + // pre-etrog + if !input.IsValidium { + // rollup + if input.BatchData == nil || len(input.BatchData) == 0 { + return nil, errors.New("batchData is required for pre-etrog rollup") + } + if input.GlobalExitRoot == nil { + return nil, errors.New("globalExitRoot is required for pre-etrog rollup") + } + newAccInputHash = CalculatePreEtrogAccInputHash( + *input.OldAccInputHash, + input.BatchData, + *input.GlobalExitRoot, + input.Timestamp, + input.Sequencer, + ) + } else { + // validium + if input.GlobalExitRoot == nil { + return nil, errors.New("globalExitRoot is required for pre-etrog validium") + } + newAccInputHash = CalculatePreEtrogValidiumAccInputHash( + *input.OldAccInputHash, + *input.BatchTransactionData, + *input.GlobalExitRoot, + input.Timestamp, + input.Sequencer, + ) + } + } + + return newAccInputHash, nil +} From d537d5f7d9dc6d6cd0d7b22ba9b8234432701776 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Mon, 25 Nov 2024 16:32:15 +0000 Subject: [PATCH 72/88] Unwind Test (#1500) * Fix/2.60 rpc sync (#1496) * simplify datastream error handling logic in stage batches # Conflicts: # zk/datastream/client/stream_client.go * interface missing on test stream client # Conflicts: # zk/datastream/client/stream_client.go * fix for nonce issue in RPC (#1501) * add zkevm_getLatestDataStreamBlock rpc endpoint # Conflicts: # turbo/jsonrpc/zkevm_api.go * use rpc in place of stream for fork 12 unwind process # Conflicts: # zk/stages/stage_batches.go * making common ancestor search testable * fix(.gitignore): changing gitignore to debug * chore(unwind-test): small changes to be able to fix this thing here. * chore(unwind-tests): changing script to run locally * fix: mod value not being padded + new test cases Signed-off-by: Ji Hwan * chore(unwind-test): changing timeout times to try and make it run without failure * chore(unwind-tests): adding a debugging log a change to the script and a test fix * chore(unwind-tests): fixing file that went wrong on rebase. * chore(unwind-tests): undoing change on test * chore(unwind-tests): comment on a part of the code + changes on unwind to test on PR * removing comments on action * tweak(makefile): add test-unwind to makefile * test(unwind): unwind test optimisations * fix(kurtosis): batch monitor specify enclave (env var) * fix(kurtosis): batch monitor specify enclave (env var) * Revert "AccInputHash - L1 Empty (#1455)" This reverts commit 358d70305396308e717df0b0623b8c27c709d405. * fix(docs): doc gen/check --------- Signed-off-by: Ji Hwan Co-authored-by: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Co-authored-by: Scott Fairclough Co-authored-by: Arthur Abeilice Co-authored-by: Ji Hwan --- .github/workflows/ci_zkevm.yml | 7 +- .github/workflows/test-unwinds.yml | 2 +- .gitignore | 5 +- Makefile | 7 + docs/endpoints/Makefile | 6 +- docs/endpoints/endpoints.md | 1 + go.mod | 1 - go.sum | 12 -- turbo/jsonrpc/zkevm_api.go | 255 +++++++------------------- turbo/jsonrpc/zkevm_api_test.go | 4 +- turbo/rpchelper/helper.go | 4 +- zk/datastream/client/stream_client.go | 173 ++++++++--------- zk/hermez_db/db.go | 2 +- zk/stages/stage_batches.go | 148 +++++++++++---- zk/stages/stage_batches_datastream.go | 5 +- zk/stages/stage_batches_test.go | 38 +++- zk/stages/stage_interhashes.go | 4 +- zk/stages/test_utils.go | 4 + zk/stages/utils.go | 64 +++++++ zk/tests/unwinds/unwind.sh | 148 +++++++++++---- zk/utils/acc_input_hash.go | 98 ---------- 21 files changed, 503 insertions(+), 485 deletions(-) diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 9b657266115..21447af3ebb 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -83,12 +83,9 @@ jobs: ref: v0.2.12 path: kurtosis-cdk - - name: Install Kurtosis CDK tools + - name: Install Kurtosis CDK tools (Kurtosis, yq, Foundry, disable analytics) uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - name: Install yq run: | sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq @@ -137,6 +134,8 @@ jobs: - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash + env: + ENCLAVE_NAME: cdk-v1 run: timeout 900s .github/scripts/monitor-verified-batches.sh --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc) --target 20 --timeout 900 - name: Set up Docker Buildx diff --git a/.github/workflows/test-unwinds.yml b/.github/workflows/test-unwinds.yml index d809f5cedd4..de1808d4421 100644 --- a/.github/workflows/test-unwinds.yml +++ b/.github/workflows/test-unwinds.yml @@ -22,7 +22,7 @@ jobs: - name: Install dependencies on Linux if: runner.os == 'Linux' - run: sudo apt update && sudo apt install build-essential + run: sudo apt update && sudo apt install -y build-essential - name: Build run: | diff --git a/.gitignore b/.gitignore index 5c4d0b47c8e..f0ca1b20fbf 100644 --- a/.gitignore +++ b/.gitignore @@ -112,4 +112,7 @@ node_modules vendor **/cover.out -**/cover.html \ No newline at end of file +**/cover.html + +datadir +zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream \ No newline at end of file diff --git a/Makefile b/Makefile index e6d40c3c252..6dbde38c866 100644 --- a/Makefile +++ b/Makefile @@ -159,6 +159,13 @@ db-tools: rm -rf vendor @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." + +## test-unwind: run the unwind tests +test-unwind: + make cdk-erigon + ./zk/tests/unwinds/unwind.sh + + test-erigon-lib: @cd erigon-lib && $(MAKE) test diff --git a/docs/endpoints/Makefile b/docs/endpoints/Makefile index d9af1597ee3..271a7c96569 100644 --- a/docs/endpoints/Makefile +++ b/docs/endpoints/Makefile @@ -1,11 +1,11 @@ -DOC_NAME:="endpoints.md" +DOC_NAME:=endpoints.md .PHONY: gen-doc -gen-doc: +gen-doc: go run main.go $(DOC_NAME) .PHONY: check-doc -check-doc: +check-doc: go run main.go tmp$(DOC_NAME) cmp -s ./$(DOC_NAME) ./tmp$(DOC_NAME); \ RETVAL=$$?; \ diff --git a/docs/endpoints/endpoints.md b/docs/endpoints/endpoints.md index e8756c070a7..4faadfb0971 100644 --- a/docs/endpoints/endpoints.md +++ b/docs/endpoints/endpoints.md @@ -198,6 +198,7 @@ If the endpoint is not in the list below, it means this specific endpoint is not - zkevm_getFullBlockByHash - zkevm_getFullBlockByNumber - zkevm_getL2BlockInfoTree +- zkevm_getLatestDataStreamBlock - zkevm_getLatestGlobalExitRoot - zkevm_getProverInput - zkevm_getRollupAddress diff --git a/go.mod b/go.mod index 8e2906a27bb..4f733dbfcf0 100644 --- a/go.mod +++ b/go.mod @@ -174,7 +174,6 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c // indirect - github.com/go-delve/delve v1.21.2 // indirect github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect github.com/go-llsqlite/crawshaw v0.4.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index 2dc4ef179ba..92381b5806a 100644 --- a/go.sum +++ b/go.sum @@ -49,10 +49,6 @@ filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7 gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c h1:alCfDKmPC0EC0KGlZWrNF0hilVWBkzMz+aAYTJ/2hY4= gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c/go.mod h1:WvSX4JsCRBuIXj0FRBFX9YLg+2SoL3w8Ww19uZO9yNE= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5 h1:p0epAhai44c34G+nzX0CZ67q3vkJtOXlO07lbhAEe9g= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwrVULR+YruSQxVXCt/SmzU= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/99designs/gqlgen v0.17.40 h1:/l8JcEVQ93wqIfmH9VS1jsAkwm6eAF1NwQn3N+SDqBY= @@ -331,8 +327,6 @@ github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= -github.com/go-delve/delve v1.21.2 h1:eaS+ziJo+660mi3D2q/VP8RxW5GcF4Y1zyKSi82alsU= -github.com/go-delve/delve v1.21.2/go.mod h1:FgTAiRUe43RS5EexL06RPyMtP8AMZVL/t9Qqgy3qUe4= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -494,8 +488,6 @@ github.com/hermeznetwork/tracerr v0.3.2/go.mod h1:nsWC1+tc4qUEbUGRv4DcPJJTjLsedl github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -1339,8 +1331,6 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1555,8 +1545,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 88284e09793..14cf4baaea5 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -82,6 +82,7 @@ type ZkEvmAPI interface { GetForks(ctx context.Context) (res json.RawMessage, err error) GetRollupAddress(ctx context.Context) (res json.RawMessage, err error) GetRollupManagerAddress(ctx context.Context) (res json.RawMessage, err error) + GetLatestDataStreamBlock(ctx context.Context) (hexutil.Uint64, error) } const getBatchWitness = "getBatchWitness" @@ -626,7 +627,7 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp batch.BatchL2Data = batchL2Data if api.l1Syncer != nil { - accInputHash, err := api.getAccInputHash(ctx, tx, hermezDb, batchNo) + accInputHash, err := api.getAccInputHash(ctx, hermezDb, batchNo) if err != nil { log.Error(fmt.Sprintf("failed to get acc input hash for batch %d: %v", batchNo, err)) } @@ -720,7 +721,12 @@ func (api *ZkEvmAPIImpl) fullTxBlockData(ctx context.Context, tx kv.Tx, hermezDb return batchBlocksJson, batchTransactionsJson, nil } -func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, tx kv.Tx, db *hermez_db.HermezDbReader, batchNum uint64) (accInputHash *common.Hash, err error) { +type SequenceReader interface { + GetRangeSequencesByBatch(batchNo uint64) (*zktypes.L1BatchInfo, *zktypes.L1BatchInfo, error) + GetForkId(batchNo uint64) (uint64, error) +} + +func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, batchNum uint64) (accInputHash *common.Hash, err error) { // get batch sequence prevSequence, batchSequence, err := db.GetRangeSequencesByBatch(batchNum) if err != nil { @@ -728,209 +734,69 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, tx kv.Tx, db *herm } // if we are asking for genesis return 0x0..0 - if (batchNum == 0) && prevSequence.BatchNo == 0 { + if batchNum == 0 && prevSequence.BatchNo == 0 { return &common.Hash{}, nil } - /* - when both are nil (i.e. no data in the L1, we must calculate - the entire set of accInputHashes sequentially ourselves - */ - l1Empty := false - if prevSequence.BatchNo == 0 && batchSequence == nil { - prevSequence = &zktypes.L1BatchInfo{ - BatchNo: 0, - } - - batchSequence = &zktypes.L1BatchInfo{ - BatchNo: batchNum, + if prevSequence == nil || batchSequence == nil { + var missing string + if prevSequence == nil && batchSequence == nil { + missing = "previous and current batch sequences" + } else if prevSequence == nil { + missing = "previous batch sequence" + } else { + missing = "current batch sequence" } - l1Empty = true + return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) } // get batch range for sequence prevSequenceBatch, currentSequenceBatch := prevSequence.BatchNo, batchSequence.BatchNo + // get call data for tx + l1Transaction, _, err := api.l1Syncer.GetTransaction(batchSequence.L1TxHash) + if err != nil { + return nil, fmt.Errorf("failed to get transaction data for tx %s: %w", batchSequence.L1TxHash, err) + } + sequenceBatchesCalldata := l1Transaction.GetData() + if len(sequenceBatchesCalldata) < 10 { + return nil, fmt.Errorf("calldata for tx %s is too short", batchSequence.L1TxHash) + } - if !l1Empty { - // get call data for tx - l1Transaction, _, err := api.l1Syncer.GetTransaction(batchSequence.L1TxHash) - if err != nil { - return nil, fmt.Errorf("failed to get transaction data for tx %s: %w", batchSequence.L1TxHash, err) - } - sequenceBatchesCalldata := l1Transaction.GetData() - if len(sequenceBatchesCalldata) < 10 { - return nil, fmt.Errorf("calldata for tx %s is too short", batchSequence.L1TxHash) - } - - currentBatchForkId, err := db.GetForkId(currentSequenceBatch) - if err != nil { - return nil, fmt.Errorf("failed to get fork id for batch %d: %w", currentSequenceBatch, err) - } - - // injected batch input hash - var prevSequenceAccInputHash common.Hash - if prevSequenceBatch == 0 { - injectedBatchForkId, err := db.GetForkId(1) - if err != nil { - return nil, fmt.Errorf("failed to get fork id for batch 1: %w", err) - } - prevSequenceAccInputHash, err = api.GetAccInputHash(ctx, injectedBatchForkId, 1) - if err != nil { - return nil, fmt.Errorf("failed to get acc input hash for batch 1: %w", err) - } - } else { - prevSequenceAccInputHash, err = api.GetAccInputHash(ctx, currentBatchForkId, prevSequenceBatch) - if err != nil { - return nil, fmt.Errorf("failed to get old acc input hash for batch %d: %w", prevSequenceBatch, err) - } - } - - // move along to the injected batch - if prevSequenceBatch == 0 { - prevSequenceBatch = 1 - } - - decodedSequenceInterface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata for tx %s: %w", batchSequence.L1TxHash, err) - } - - accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInterface) - if err != nil { - return nil, fmt.Errorf("failed to get accInputHash calculation func: %w", err) - } - - if totalSequenceBatches == 0 || batchNum-prevSequenceBatch > uint64(totalSequenceBatches) { - return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) - } - - accInputHash = &prevSequenceAccInputHash - // calculate acc input hash - for i := 0; i < int(batchNum-prevSequenceBatch); i++ { - accInputHash = accInputHashCalcFn(prevSequenceAccInputHash, i) - prevSequenceAccInputHash = *accInputHash - } - } else { - // l1 is empty - - /* - Step 1: accInputHash of genesis is 0x00..00 - Step 2: get the accInputHash of the injected batch from the sequencer - Step 3: profit - */ - - // acc input hash of batch 0 is 0x00...00 - if batchNum == 0 { - return &common.Hash{}, nil - } - - // get the accInputHash of the injected batch - prevSequenceAccInputHash, err := api.getInjectedBatchAccInputHashFromSequencer(api.config.Zk.L2RpcUrl) - if err != nil { - return nil, fmt.Errorf("failed to get acc input hash for injected batch: %w", err) - } - - if batchNum == 1 { - return prevSequenceAccInputHash, nil - } - - // pre-retrieve all info tree indexes - infoTreeIndexes, err := db.GetL1InfoTreeIndexToRoots() - if err != nil { - return nil, fmt.Errorf("failed to get l1 info tree indexes: %w", err) - } - if len(infoTreeIndexes) == 0 { - return nil, fmt.Errorf("no l1 info tree indexes found") - } - - // loop from batch 1 -> batch n (accInputHash to batch 1 is 0x0...0) - for i := 2; i <= int(batchNum); i++ { - currentForkId, err := db.GetForkId(uint64(i)) - if err != nil { - return nil, fmt.Errorf("failed to get fork id for batch %d: %w", i, err) - } + currentBatchForkId, err := db.GetForkId(currentSequenceBatch) + if err != nil { + return nil, fmt.Errorf("failed to get fork id for batch %d: %w", currentSequenceBatch, err) + } - /* - required data: - - sequencer addr - get current batch, get a block in it and use the coinbase - - batch data - construct the batchl2data from the db (think there's already a func to do this somewhere!) - - l1info root - from the DB - - limit timestamp - from the DB? - - forced block hash - nil afaik - - batch hash data - how to calculate for validium? - - global exit root - get from DB - - timestamp - from the DB - - batch transaction data - how to calculate for validium? - */ - - batchBlockNos, err := db.GetL2BlockNosByBatch(uint64(i)) - if err != nil { - return nil, fmt.Errorf("failed to get batch blocks for batch %d: %w", i, err) - } - batchBlocks := []*eritypes.Block{} - var batchTxs []eritypes.Transaction - var coinbase common.Address - for in, blockNo := range batchBlockNos { - block, err := api.ethApi.BaseAPI.blockByNumberWithSenders(ctx, tx, blockNo) - if err != nil { - return nil, fmt.Errorf("failed to get block %d: %w", blockNo, err) - } - if in == 0 { - coinbase = block.Coinbase() - } - batchBlocks = append(batchBlocks, block) - batchTxs = append(batchTxs, block.Transactions()...) - } - batchL2Data, err := utils.GenerateBatchDataFromDb(tx, db, batchBlocks, currentForkId) - if err != nil { - return nil, fmt.Errorf("failed to generate batch data for batch %d: %w", i, err) - } + prevSequenceAccinputHash, err := api.GetccInputHash(ctx, currentBatchForkId, prevSequenceBatch) + if err != nil { + return nil, fmt.Errorf("failed to get old acc input hash for batch %d: %w", prevSequenceBatch, err) + } - // pre-etrog data - ger, err := db.GetBlockGlobalExitRoot(batchBlockNos[len(batchBlockNos)-1]) - if err != nil { - return nil, fmt.Errorf("failed to get global exit root for batch %d: %w", i, err) - } + decodedSequenceInterface, err := syncer.DecodeSequenceBatchesCalldata(sequenceBatchesCalldata) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata for tx %s: %w", batchSequence.L1TxHash, err) + } - // etrog data - l1InfoTreeUpdate, err := db.GetL1InfoTreeUpdateByGer(ger) - if err != nil { - return nil, fmt.Errorf("failed to get l1 info root for batch %d: %w", i, err) - } - l1InfoRoot := infoTreeIndexes[0] - timeStamp := uint64(0) - if l1InfoTreeUpdate != nil { - l1InfoRoot = infoTreeIndexes[l1InfoTreeUpdate.Index] - timeStamp = l1InfoTreeUpdate.Timestamp - } + accInputHashCalcFn, totalSequenceBatches, err := syncer.GetAccInputDataCalcFunction(batchSequence.L1InfoRoot, decodedSequenceInterface) + if err != nil { + return nil, fmt.Errorf("failed to get accInputHash calculation func: %w", err) + } - limitTs := batchBlocks[len(batchBlocks)-1].Time() - - inputs := zkUtils.AccHashInputs{ - OldAccInputHash: prevSequenceAccInputHash, - Sequencer: coinbase, - BatchData: batchL2Data, - L1InfoRoot: &l1InfoRoot, - LimitTimestamp: limitTs, - ForcedBlockHash: &common.Hash{}, - GlobalExitRoot: &ger, - Timestamp: timeStamp, - BatchTransactionData: nil, - IsValidium: len(api.config.Zk.DAUrl) > 0, - } + if totalSequenceBatches == 0 || batchNum-prevSequenceBatch > uint64(totalSequenceBatches) { + return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) + } - accInputHash, err = zkUtils.CalculateAccInputHashByForkId(inputs, currentForkId) - if err != nil { - return nil, fmt.Errorf("failed to calculate accInputHash for batch %d: %w", i, err) - } - prevSequenceAccInputHash = accInputHash - } + // calculate acc input hash + accInputHash = &prevSequenceAccinputHash + for i := 0; i < int(batchNum-prevSequenceBatch); i++ { + accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) + prevSequenceAccinputHash = *accInputHash } return } -func (api *ZkEvmAPIImpl) GetAccInputHash(ctx context.Context, currentBatchForkId, lastSequenceBatchNumber uint64) (accInputHash common.Hash, err error) { +func (api *ZkEvmAPIImpl) GetccInputHash(ctx context.Context, currentBatchForkId, lastSequenceBatchNumber uint64) (accInputHash common.Hash, err error) { if currentBatchForkId < uint64(chain.ForkID8Elderberry) { accInputHash, err = api.l1Syncer.GetPreElderberryAccInputHash(ctx, &api.config.AddressRollup, lastSequenceBatchNumber) } else { @@ -1296,7 +1162,7 @@ func (api *ZkEvmAPIImpl) GetProverInput(ctx context.Context, batchNumber uint64, var oldAccInputHash common.Hash if batchNumber > 0 { - oaih, err := api.getAccInputHash(ctx, tx, hDb, batchNumber-1) + oaih, err := api.getAccInputHash(ctx, hDb, batchNumber-1) if err != nil { return nil, err } @@ -2086,3 +1952,18 @@ func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string return &decoded, nil } + +func (api *ZkEvmAPIImpl) GetLatestDataStreamBlock(ctx context.Context) (hexutil.Uint64, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + + latestBlock, err := stages.GetStageProgress(tx, stages.DataStream) + if err != nil { + return 0, err + } + + return hexutil.Uint64(latestBlock), nil +} diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 715563f0909..c812cd53230 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -457,7 +457,7 @@ func TestGetBatchByNumber(t *testing.T) { EthermanMock.EXPECT().TransactionByHash(ctx, common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97")).Return(txByHashResponse, true, nil).AnyTimes() storageAtResponse := []byte{} - EthermanMock.EXPECT().StorageAt(ctx, common.HexToAddress("0x000"), common.HexToHash("0x5317d76ba28a4ffb21ed890613e0cdfc6847329136ad56bef014d23f3b6b63b2"), nil).Return(storageAtResponse, nil).AnyTimes() + EthermanMock.EXPECT().StorageAt(ctx, common.HexToAddress("0x000"), common.HexToHash("0xb5ad54240dc61c51d3a3e8d3f925722e010966ae263d67344c5fb60bddebddae"), nil).Return(storageAtResponse, nil).AnyTimes() var response2 []byte response2 = append(response2, accInputHash.Bytes()...) @@ -480,7 +480,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), batch.AccInputHash) + assert.Equal(common.HexToHash("0x97d1524156ccb46723e5c3c87951da9a390499ba288161d879df1dbc03d49afc"), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 7cecd4826d5..8b0b5d76155 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -58,9 +58,7 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, number := *blockNrOrHash.BlockNumber switch number { case rpc.LatestBlockNumber: - if blockNumber, err = GetLatestFinishedBlockNumber(tx); err != nil { - return 0, libcommon.Hash{}, false, err - } + blockNumber = finishedBlockNumber case rpc.EarliestBlockNumber: blockNumber = 0 case rpc.FinalizedBlockNumber: diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index 3cb49c56924..23987f7ea31 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -65,6 +65,9 @@ type StreamClient struct { // which makes sense for an active server listening for these things but in unit tests // this makes behaviour very unpredictable and hard to test allowStops bool + + lastError error + started bool } const ( @@ -116,46 +119,23 @@ var ( // and streams the changes for that block (including the transactions). // Note that this function is intended for on demand querying and it disposes the connection after it ends. func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.FullL2Block, err error) { - var ( - connected bool = c.conn != nil - ) - count := 0 - for { - select { - case <-c.ctx.Done(): - return nil, fmt.Errorf("context done - stopping") - - default: - } - if count > 5 { - return nil, ErrFailedAttempts - } - if connected { - if err := c.stopStreamingIfStarted(); err != nil { - return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) - } + select { + case <-c.ctx.Done(): + return nil, fmt.Errorf("context done - stopping") - if fullBLock, err = c.getL2BlockByNumber(blockNum); err == nil { - break - } - - if errors.Is(err, types.ErrAlreadyStarted) { - // if the client is already started, we can stop the client and try again - if errStop := c.Stop(); errStop != nil { - log.Warn("failed to send stop command", "error", errStop) - } - } else if !errors.Is(err, ErrSocket) { - return nil, fmt.Errorf("getL2BlockByNumber: %w", err) - } + default: + } + if err := c.stopStreamingIfStarted(); err != nil { + return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + } - } - time.Sleep(1 * time.Second) - connected = c.handleSocketError(err) - count++ - err = nil + fullBlock, err := c.getL2BlockByNumber(blockNum) + if err != nil { + c.lastError = err + return nil, err } - return fullBLock, nil + return fullBlock, nil } func (c *StreamClient) getL2BlockByNumber(blockNum uint64) (l2Block *types.FullL2Block, err error) { @@ -204,46 +184,22 @@ func (c *StreamClient) getL2BlockByNumber(blockNum uint64) (l2Block *types.FullL // it retrieves the latest File entry that is of EntryTypeL2Block type. // Note that this function is intended for on demand querying and it disposes the connection after it ends. func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error) { - var ( - connected bool = c.conn != nil - ) - count := 0 - for { - select { - case <-c.ctx.Done(): - return nil, errors.New("context done - stopping") - default: - } - if count > 5 { - return nil, ErrFailedAttempts - } - if connected { - if err = c.stopStreamingIfStarted(); err != nil { - err = fmt.Errorf("stopStreamingIfStarted: %w", err) - } - if err == nil { - if l2Block, err = c.getLatestL2Block(); err == nil { - break - } - err = fmt.Errorf("getLatestL2Block: %w", err) - } - - if err != nil && !errors.Is(err, ErrSocket) { - return nil, err - } else if errors.Is(err, types.ErrAlreadyStarted) { - // if the client is already started, we can stop the client and try again - if errStop := c.Stop(); errStop != nil { - log.Warn("failed to send stop command", "error", errStop) - } - } - err = nil - } + select { + case <-c.ctx.Done(): + return nil, errors.New("context done - stopping") + default: + } + if err = c.stopStreamingIfStarted(); err != nil { + err = fmt.Errorf("stopStreamingIfStarted: %w", err) + } - time.Sleep(1 * time.Second) - connected = c.handleSocketError(err) - count++ + fullBlock, err := c.getLatestL2Block() + if err != nil { + c.lastError = err + return nil, err } - return l2Block, nil + + return fullBlock, nil } func (c *StreamClient) getStreaming() bool { @@ -461,32 +417,28 @@ func (c *StreamClient) RenewEntryChannel() { } func (c *StreamClient) ReadAllEntriesToChannel() (err error) { - var ( - connected bool = c.conn != nil - ) - count := 0 - for { - select { - case <-c.ctx.Done(): - return fmt.Errorf("context done - stopping") - default: + defer func() { + if err != nil { + c.lastError = err } - if connected { - if err := c.stopStreamingIfStarted(); err != nil { - return fmt.Errorf("stopStreamingIfStarted: %w", err) - } + }() + select { + case <-c.ctx.Done(): + return fmt.Errorf("context done - stopping") + default: + } + if err := c.stopStreamingIfStarted(); err != nil { + return fmt.Errorf("stopStreamingIfStarted: %w", err) + } - if err = c.readAllEntriesToChannel(); err == nil { - break - } - if !errors.Is(err, ErrSocket) { - return fmt.Errorf("readAllEntriesToChannel: %w", err) - } - } + // first load up the header of the stream + if _, err := c.GetHeader(); err != nil { + return fmt.Errorf("GetHeader: %w", err) + } - time.Sleep(1 * time.Second) - connected = c.handleSocketError(err) - count++ + if err = c.readAllEntriesToChannel(); err != nil { + c.lastError = err + return err } return nil @@ -510,7 +462,10 @@ func (c *StreamClient) handleSocketError(socketErr error) bool { // at end will wait for new entries to arrive func (c *StreamClient) readAllEntriesToChannel() (err error) { defer func() { - c.setStreaming(false) + if err != nil { + c.setStreaming(false) + c.lastError = err + } }() c.setStreaming(true) @@ -645,6 +600,28 @@ LOOP: return nil } +func (c *StreamClient) HandleStart() error { + if !c.started { + log.Info("[Datastream client] Starting datastream client from cold") + // never been started - so kick things off + if err := c.Start(); err != nil { + return err + } + c.started = true + } + + if c.lastError != nil { + log.Info("[Datastream client] Last error detected, trying to reconnect") + // we had an error last time, so try to reconnect + if err := c.tryReConnect(); err != nil { + return err + } + c.lastError = nil + } + + return nil +} + func (c *StreamClient) tryReConnect() (err error) { if c.conn != nil { if err := c.conn.Close(); err != nil { diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 1607e875d89..01e8bdfe3e4 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -929,7 +929,7 @@ func (db *HermezDbReader) GetBatchGlobalExitRootsProto(fromBatchNum, toBatchNum return gersProto, nil } -// Deprecated: GetBatchGlobalExitRoot will not work post etrog +// GetBatchGlobalExitRoot deprecated: post etrog this will not work func (db *HermezDbReader) GetBatchGlobalExitRoot(batchNum uint64) (*dstypes.GerUpdate, error) { gerUpdateBytes, err := db.tx.GetOne(GLOBAL_EXIT_ROOTS_BATCHES, Uint64ToBytes(batchNum)) if err != nil { diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index 31535c38d10..12b3f45aac7 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -7,10 +7,11 @@ import ( "math/big" "sync/atomic" "time" + "os" + "syscall" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" ethTypes "github.com/ledgerwatch/erigon/core/types" @@ -22,15 +23,15 @@ import ( "github.com/ledgerwatch/erigon/zk/erigon_db" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/sequencer" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/zk/datastream/client" "github.com/ledgerwatch/log/v3" ) const ( - STAGE_PROGRESS_SAVE = 3000000 + STAGE_PROGRESS_SAVE = 100_000 NEW_BLOCKS_ON_DS_LIMIT = 10000 ) @@ -68,6 +69,7 @@ type DatastreamClient interface { Start() error Stop() error PrepUnwind() + HandleStart() error } type DatastreamReadRunner interface { @@ -151,10 +153,19 @@ func SpawnStageBatches( } //// BISECT //// - if cfg.zkCfg.DebugLimit > 0 && stageProgressBlockNo > cfg.zkCfg.DebugLimit { - log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "debugLimit", cfg.zkCfg.DebugLimit) - time.Sleep(2 * time.Second) - return nil + if cfg.zkCfg.DebugLimit > 0 { + finishProg, err := stages.GetStageProgress(tx, stages.Finish) + if err != nil { + } + if finishProg >= cfg.zkCfg.DebugLimit { + log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "finishProg", finishProg, "debugLimit", cfg.zkCfg.DebugLimit) + syscall.Kill(os.Getpid(), syscall.SIGINT) + } + + if stageProgressBlockNo >= cfg.zkCfg.DebugLimit { + log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "debugLimit", cfg.zkCfg.DebugLimit) + return nil + } } // this limit is blocknumber not included, so up to limit-1 @@ -184,7 +195,11 @@ func SpawnStageBatches( } defer stopDsClient() - var highestDSL2Block *types.FullL2Block + if err := dsQueryClient.HandleStart(); err != nil { + return err + } + + var highestDSL2Block uint64 newBlockCheckStartTIme := time.Now() for { select { @@ -192,33 +207,38 @@ func SpawnStageBatches( return nil default: } - if highestDSL2Block, err = dsQueryClient.GetLatestL2Block(); err != nil { + + highestDSL2Block, err = getHighestDSL2Block(ctx, cfg, uint16(latestForkId)) + if err != nil { // if we return error, stage will replay and block all other stages log.Warn(fmt.Sprintf("[%s] Failed to get latest l2 block from datastream: %v", logPrefix, err)) + // because this is likely something network related lets put a pause here for just a couple of + // seconds to save the node going into a crazy loop + time.Sleep(2 * time.Second) return nil } // a lower block should also break the loop because that means the datastream was unwound // thus we should unwind as well and continue from there - if highestDSL2Block.L2BlockNumber != stageProgressBlockNo { - log.Info(fmt.Sprintf("[%s] Highest block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "stageProgressBlockNo", stageProgressBlockNo) + if highestDSL2Block != stageProgressBlockNo { + log.Info(fmt.Sprintf("[%s] Highest block in datastream", logPrefix), "datastreamBlock", highestDSL2Block, "stageProgressBlockNo", stageProgressBlockNo) break } if time.Since(newBlockCheckStartTIme) > 10*time.Second { - log.Info(fmt.Sprintf("[%s] Waiting for at least one new block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "last processed block", stageProgressBlockNo) + log.Info(fmt.Sprintf("[%s] Waiting for at least one new block in datastream", logPrefix), "datastreamBlock", highestDSL2Block, "last processed block", stageProgressBlockNo) newBlockCheckStartTIme = time.Now() } time.Sleep(50 * time.Millisecond) } - log.Debug(fmt.Sprintf("[%s] Highest block in db and datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "dbBlock", stageProgressBlockNo) + log.Debug(fmt.Sprintf("[%s] Highest block in db and datastream", logPrefix), "datastreamBlock", highestDSL2Block, "dbBlock", stageProgressBlockNo) unwindFn := func(unwindBlock uint64) (uint64, error) { - return rollback(logPrefix, eriDb, hermezDb, dsQueryClient, unwindBlock, tx, u) + return rollback(ctx, cfg, logPrefix, eriDb, hermezDb, unwindBlock, uint16(latestForkId), tx, u) } - if highestDSL2Block.L2BlockNumber < stageProgressBlockNo { + if highestDSL2Block < stageProgressBlockNo { log.Info(fmt.Sprintf("[%s] Datastream behind, unwinding", logPrefix)) - if _, err := unwindFn(highestDSL2Block.L2BlockNumber); err != nil { - return fmt.Errorf("unwindFn: %w", err) + if _, err := unwindFn(highestDSL2Block); err != nil { + return err } return nil } @@ -259,8 +279,9 @@ func SpawnStageBatches( } // start routine to download blocks and push them in a channel + errorChan := make(chan struct{}) dsClientRunner := NewDatastreamClientRunner(dsQueryClient, logPrefix) - dsClientRunner.StartRead() + dsClientRunner.StartRead(errorChan) defer dsClientRunner.StopRead() entryChan := dsQueryClient.GetEntryChan() @@ -275,7 +296,15 @@ func SpawnStageBatches( // if download routine finished, should continue to read from channel until it's empty // if both download routine stopped and channel empty - stop loop select { + case <-errorChan: + log.Warn("Error in datastream client, stopping consumption") + endLoop = true case entry := <-*entryChan: + // DEBUG LIMIT - don't write more than we need to + if cfg.zkCfg.DebugLimit > 0 && batchProcessor.LastBlockHeight() >= cfg.zkCfg.DebugLimit { + endLoop = true + break + } if endLoop, err = batchProcessor.ProcessEntry(entry); err != nil { // if we triggered an unwind somewhere we need to return from the stage if err == ErrorTriggeredUnwind { @@ -628,16 +657,26 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx // 2. resolves the unwind block (as the latest block in the previous batch, comparing to the found ancestor block) // 3. triggers the unwinding func rollback( + ctx context.Context, + cfg BatchesCfg, logPrefix string, eriDb *erigon_db.ErigonDb, hermezDb *hermez_db.HermezDb, - dsQueryClient DatastreamClient, latestDSBlockNum uint64, + latestFork uint16, tx kv.RwTx, u stagedsync.Unwinder, ) (uint64, error) { - dsQueryClient.PrepUnwind() - ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(eriDb, hermezDb, dsQueryClient, latestDSBlockNum) + dsClient := buildNewStreamClient(ctx, cfg, latestFork) + if err := dsClient.Start(); err != nil { + return 0, err + } + defer func() { + if err := dsClient.Stop(); err != nil { + log.Error(fmt.Sprintf("[%s] Failed to stop datastream client whilst rolling back", logPrefix), "error", err) + } + }() + ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(cfg, eriDb, hermezDb, l2BlockReaderRpc{}, latestDSBlockNum) if err != nil { return 0, fmt.Errorf("findCommonAncestor: %w", err) } @@ -657,13 +696,20 @@ func rollback( return unwindBlockNum, nil } +type L2BlockReaderRpc interface { + GetZKBlockByNumberHash(url string, blockNum uint64) (common.Hash, error) + GetBatchNumberByBlockNumber(url string, blockNum uint64) (uint64, error) +} + // findCommonAncestor searches the latest common ancestor block number and hash between the data stream and the local db. // The common ancestor block is the one that matches both l2 block hash and batch number. func findCommonAncestor( + cfg BatchesCfg, db erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHermezDb, - dsClient DatastreamClient, - latestBlockNum uint64) (uint64, common.Hash, error) { + blockReaderRpc L2BlockReaderRpc, + latestBlockNum uint64, +) (uint64, common.Hash, error) { var ( startBlockNum = uint64(0) endBlockNum = latestBlockNum @@ -681,11 +727,14 @@ func findCommonAncestor( } midBlockNum := (startBlockNum + endBlockNum) / 2 - midBlockDataStream, err := dsClient.GetL2BlockByNumber(midBlockNum) - if err != nil && - // the required block might not be in the data stream, so ignore that error - !errors.Is(err, types.ErrBadFromBookmark) { - return 0, emptyHash, fmt.Errorf("GetL2BlockByNumber block %d: %w", midBlockNum, err) + headerHash, err := blockReaderRpc.GetZKBlockByNumberHash(cfg.zkCfg.L2RpcUrl, midBlockNum) + if err != nil { + return 0, emptyHash, fmt.Errorf("ZkBlockHash: failed to get header for block %d: %w", midBlockNum, err) + } + + blockBatch, err := blockReaderRpc.GetBatchNumberByBlockNumber(cfg.zkCfg.L2RpcUrl, midBlockNum) + if err != nil { + return 0, emptyHash, fmt.Errorf("GetBatchNumberByBlockNumber: failed to get batch number for block %d: %w", midBlockNum, err) } midBlockDbHash, err := db.ReadCanonicalHash(midBlockNum) @@ -698,9 +747,9 @@ func findCommonAncestor( return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block block %d: %w", midBlockNum, err) } - if midBlockDataStream != nil && - midBlockDataStream.L2Blockhash == midBlockDbHash && - midBlockDataStream.BatchNumber == dbBatchNum { + if headerHash != (common.Hash{}) && + headerHash == midBlockDbHash && + blockBatch == dbBatchNum { startBlockNum = midBlockNum + 1 blockNumber = &midBlockNum @@ -764,3 +813,40 @@ func newStreamClient(ctx context.Context, cfg BatchesCfg, latestForkId uint64) ( return dsClient, stopFn, nil } + +func getHighestDSL2Block(ctx context.Context, batchCfg BatchesCfg, latestFork uint16) (uint64, error) { + cfg := batchCfg.zkCfg + + // first try the sequencer rpc endpoint, it might not have been upgraded to the + // latest version yet so if we get an error back from this call we can try the older + // method of calling the datastream directly + highestBlock, err := GetSequencerHighestDataStreamBlock(cfg.L2RpcUrl) + if err == nil { + return highestBlock, nil + } + + // so something went wrong with the rpc call, let's try the older method, + // but we're going to open a new connection rather than use the one for syncing blocks. + // This is so we can keep the logic simple and just dispose of the connection when we're done + // greatly simplifying state juggling of the connection if it errors + dsClient := buildNewStreamClient(ctx, batchCfg, latestFork) + if err = dsClient.Start(); err != nil { + return 0, err + } + defer func() { + if err := dsClient.Stop(); err != nil { + log.Error("problem stopping datastream client looking up latest ds l2 block", "err", err) + } + }() + fullBlock, err := dsClient.GetLatestL2Block() + if err != nil { + return 0, err + } + + return fullBlock.L2BlockNumber, nil +} + +func buildNewStreamClient(ctx context.Context, batchesCfg BatchesCfg, latestFork uint16) *client.StreamClient { + cfg := batchesCfg.zkCfg + return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestFork) +} diff --git a/zk/stages/stage_batches_datastream.go b/zk/stages/stage_batches_datastream.go index a1f9926e067..0721ca7511b 100644 --- a/zk/stages/stage_batches_datastream.go +++ b/zk/stages/stage_batches_datastream.go @@ -6,6 +6,7 @@ import ( "sync/atomic" "github.com/ledgerwatch/log/v3" + "time" ) type DatastreamClientRunner struct { @@ -22,7 +23,7 @@ func NewDatastreamClientRunner(dsClient DatastreamClient, logPrefix string) *Dat } } -func (r *DatastreamClientRunner) StartRead() error { +func (r *DatastreamClientRunner) StartRead(errorChan chan struct{}) error { r.dsClient.RenewEntryChannel() if r.isReading.Load() { return fmt.Errorf("tried starting datastream client runner thread while another is running") @@ -40,6 +41,8 @@ func (r *DatastreamClientRunner) StartRead() error { defer r.isReading.Store(false) if err := r.dsClient.ReadAllEntriesToChannel(); err != nil { + time.Sleep(1 * time.Second) + errorChan <- struct{}{} log.Warn(fmt.Sprintf("[%s] Error downloading blocks from datastream", r.logPrefix), "error", err) } }() diff --git a/zk/stages/stage_batches_test.go b/zk/stages/stage_batches_test.go index 6299f75cc39..037c4215571 100644 --- a/zk/stages/stage_batches_test.go +++ b/zk/stages/stage_batches_test.go @@ -178,20 +178,27 @@ func TestFindCommonAncestor(t *testing.T) { hermezDb := hermez_db.NewHermezDb(tx) erigonDb := erigon_db.NewErigonDb(tx) - dsBlocks := l2Blocks[:tc.dsBlocksCount] dbBlocks := l2Blocks[:tc.dbBlocksCount] if tc.divergentBlockHistory { dbBlocks = l2Blocks[tc.dsBlocksCount : tc.dbBlocksCount+tc.dsBlocksCount] } - dsClient := NewTestDatastreamClient(dsBlocks, nil) + reader := newMockL2BlockReaderRpc() + for _, l2Block := range dbBlocks { require.NoError(t, hermezDb.WriteBlockBatch(l2Block.L2BlockNumber, l2Block.BatchNumber)) require.NoError(t, rawdb.WriteCanonicalHash(tx, l2Block.L2Blockhash, l2Block.L2BlockNumber)) + reader.addBlockDetail(l2Block.L2BlockNumber, l2Block.BatchNumber, l2Block.L2Blockhash) + } + + cfg := BatchesCfg{ + zkCfg: ðconfig.Zk{ + L2RpcUrl: "test", + }, } // ACT - ancestorNum, ancestorHash, err := findCommonAncestor(erigonDb, hermezDb, dsClient, tc.latestBlockNum) + ancestorNum, ancestorHash, err := findCommonAncestor(cfg, erigonDb, hermezDb, reader, tc.latestBlockNum) // ASSERT if tc.expectedError != nil { @@ -241,3 +248,28 @@ func createTestL2Blocks(t *testing.T, blocksCount int) []types.FullL2Block { return l2Blocks } + +type mockL2BlockReaderRpc struct { + blockHashes map[uint64]common.Hash + blockBatches map[uint64]uint64 +} + +func newMockL2BlockReaderRpc() mockL2BlockReaderRpc { + return mockL2BlockReaderRpc{ + blockHashes: make(map[uint64]common.Hash), + blockBatches: make(map[uint64]uint64), + } +} + +func (m mockL2BlockReaderRpc) addBlockDetail(number, batch uint64, hash common.Hash) { + m.blockHashes[number] = hash + m.blockBatches[number] = batch +} + +func (m mockL2BlockReaderRpc) GetZKBlockByNumberHash(url string, blockNum uint64) (common.Hash, error) { + return m.blockHashes[blockNum], nil +} + +func (m mockL2BlockReaderRpc) GetBatchNumberByBlockNumber(url string, blockNum uint64) (uint64, error) { + return m.blockBatches[blockNum], nil +} diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index 381c8fdee08..b4cd61c10d3 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -105,12 +105,10 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin ///// DEBUG BISECT ///// defer func() { if cfg.zk.DebugLimit > 0 { + log.Info(fmt.Sprintf("[%s] Debug limits", logPrefix), "Limit", cfg.zk.DebugLimit, "TO", to, "Err is nil ?", err == nil) if err != nil { log.Error("Hashing Failed", "block", to, "err", err) os.Exit(1) - } else if to >= cfg.zk.DebugLimit { - tx.Commit() - os.Exit(0) } } }() diff --git a/zk/stages/test_utils.go b/zk/stages/test_utils.go index 221ccc1734b..e4cf0dd80d7 100644 --- a/zk/stages/test_utils.go +++ b/zk/stages/test_utils.go @@ -108,3 +108,7 @@ func (c *TestDatastreamClient) Stop() error { func (c *TestDatastreamClient) PrepUnwind() { // do nothing } + +func (c *TestDatastreamClient) HandleStart() error { + return nil +} diff --git a/zk/stages/utils.go b/zk/stages/utils.go index eff75671035..9c22212ea01 100644 --- a/zk/stages/utils.go +++ b/zk/stages/utils.go @@ -15,6 +15,15 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core/types" db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + jsonClient "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" + jsonTypes "github.com/ledgerwatch/erigon/zkevm/jsonrpc/types" + "github.com/ledgerwatch/erigon-lib/common/hexutil" +) + +const ( + SEQUENCER_DATASTREAM_RPC_CALL = "zkevm_getLatestDataStreamBlock" + BATCH_NUMBER_BY_BLOCK_NUMBER = "zkevm_batchNumberByBlockNumber" + ZK_BLOCK_BY_NUMBER = "zkevm_getFullBlockByNumber" ) func TrimHexString(s string) string { @@ -151,3 +160,58 @@ func DeriveEffectiveGasPrice(cfg SequenceBlockCfg, tx types.Transaction) uint8 { return cfg.zk.EffectiveGasPriceForEthTransfer } + +func GetSequencerHighestDataStreamBlock(endpoint string) (uint64, error) { + res, err := jsonClient.JSONRPCCall(endpoint, SEQUENCER_DATASTREAM_RPC_CALL) + if err != nil { + return 0, err + } + + return trimHexAndHandleUint64Result(res) +} + +type l2BlockReaderRpc struct { +} + +func (l2BlockReaderRpc) GetZKBlockByNumberHash(endpoint string, blockNo uint64) (common.Hash, error) { + asHex := fmt.Sprintf("0x%x", blockNo) + res, err := jsonClient.JSONRPCCall(endpoint, ZK_BLOCK_BY_NUMBER, asHex, false) + if err != nil { + return common.Hash{}, err + } + + type ZkBlock struct { + Hash common.Hash `json:"hash"` + } + + var zkBlock ZkBlock + if err := json.Unmarshal(res.Result, &zkBlock); err != nil { + return common.Hash{}, err + } + + return zkBlock.Hash, nil +} + +func (l2BlockReaderRpc) GetBatchNumberByBlockNumber(endpoint string, blockNo uint64) (uint64, error) { + asHex := fmt.Sprintf("0x%x", blockNo) + res, err := jsonClient.JSONRPCCall(endpoint, BATCH_NUMBER_BY_BLOCK_NUMBER, asHex) + if err != nil { + return 0, err + } + + return trimHexAndHandleUint64Result(res) +} + +func trimHexAndHandleUint64Result(res jsonTypes.Response) (uint64, error) { + // hash comes in escaped quotes, so we trim them here + // \"0x1234\" -> 0x1234 + hashHex := strings.Trim(string(res.Result), "\"") + + // now convert to a uint + decoded, err := hexutil.DecodeUint64(hashHex) + if err != nil { + return 0, err + } + + return decoded, nil +} diff --git a/zk/tests/unwinds/unwind.sh b/zk/tests/unwinds/unwind.sh index b48f1c15c55..d1a738becb6 100755 --- a/zk/tests/unwinds/unwind.sh +++ b/zk/tests/unwinds/unwind.sh @@ -11,103 +11,179 @@ # 8. dump the data # 9. compare the dumps at the unwind level and tip level +SECONDS=0 + +dspid=$(lsof -i :6900 | awk 'NR==2 {print $2}') +kill -9 "$dspid" + +cleanup() { + echo "killing datastream server" + if [[ -n "$dspid" ]]; then + echo "killing process with PID $dspid on port 6900" + kill -9 "$dspid" + fi + + echo "cleaning data directories" + rm -rf "$dataPath/rpc-datadir" + rm -rf "$dataPath/phase1-dump1" + rm -rf "$dataPath/phase1-dump2" + + rm -rf "$dataPath/phase2-dump1" + rm -rf "$dataPath/phase2-dump2" + + echo "Total execution time: $SECONDS seconds" +} + +trap cleanup EXIT + dataPath="./datadir" -firstStop=11204 +datastreamPath="zk/tests/unwinds/datastream" +datastreamZipFileName="./datastream-net8-upto-11318-101.zip" +firstStop=11203 stopBlock=11315 unwindBatch=70 -firstTimeout=300s -secondTimeout=300s + +pushd "$datastreamPath" || exit + tar -xzf "$datastreamZipFileName" +popd || exit rm -rf "$dataPath/rpc-datadir" rm -rf "$dataPath/phase1-dump1" rm -rf "$dataPath/phase1-dump2" + rm -rf "$dataPath/phase2-dump1" rm -rf "$dataPath/phase2-dump2" -rm -rf "$dataPath/phase1-diffs" -rm -rf "$dataPath/phase2-diffs" + +# rm -rf "$dataPath/phase1-diffs" +# rm -rf "$dataPath/phase2-diffs" # run datastream server +echo -e '\nStarting datastream server \n' go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & -# in order to start the datastream server -sleep 10 +dspid=$! # get the id of the DS process + +echo "Waiting for datastream server to become available on port 6900..." +while ! bash -c "/dev/null; do + sleep 1 +done +echo "Datastream server is now available." -# run erigon for a while to sync to the unwind point to capture the dump -timeout $firstTimeout ./build/bin/cdk-erigon \ +# try with 1 and check on time +echo -e '\nRun Erigon to BlockHeight: ' "${firstStop}" '\n' +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${firstStop} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit=1 \ + --debug.limit="${firstStop}" + +echo -e '\nDumping data \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase1-dump1" +echo -e '\nRun Erigon to Block Height: ' "${stopBlock}" '\n' # now run to the final stop block -timeout $secondTimeout ./build/bin/cdk-erigon \ +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${stopBlock} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit="${stopBlock}" +echo -e '\nDumping data phase 2 \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump1" # now run the unwind +echo -e '\nUnwinding to batch: ' "${unwindBatch}" '\n' go run ./cmd/integration state_stages_zkevm \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ --chain=dynamic-integration \ - --unwind-batch-no=${unwindBatch} + --unwind-batch-no="${unwindBatch}" +echo -e '\nDumping data after unwind \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase1-dump2" - -mkdir -p "$dataPath/phase1-diffs/pre" -mkdir -p "$dataPath/phase1-diffs/post" +# mkdir -p "$dataPath/phase1-diffs/pre" +# mkdir -p "$dataPath/phase1-diffs/post" + +different_files=( + "Code.txt" + "HashedCodeHash.txt" + "hermez_l1Sequences.txt" + "hermez_l1Verifications.txt" + "HermezSmt.txt" + "PlainCodeHash.txt" + "SyncStage.txt" + "BadHeaderNumber.txt" + "CallToIndex.txt" +) + +is_in_array() { + local element + for element in "${different_files[@]}"; do + if [[ "$element" == "$filename" ]]; then + return 0 + fi + done + return 1 +} # iterate over the files in the pre-dump folder -for file in $(ls $dataPath/phase1-dump1); do +# we are going to check if unwind worked +for file in "$dataPath/phase1-dump1"/*; do # get the filename - filename=$(basename $file) + filename=$(basename "$file") # diff the files and if there is a difference found copy the pre and post files into the diffs folder - if cmp -s $dataPath/phase1-dump1/$filename $dataPath/phase1-dump2/$filename; then + if cmp -s "$dataPath/phase1-dump1/$filename" "$dataPath/phase1-dump2/$filename"; then echo "No difference found in $filename" else - if [ "$filename" = "Code.txt" ] || [ "$filename" = "HashedCodeHash.txt" ] || [ "$filename" = "hermez_l1Sequences.txt" ] || [ "$filename" = "hermez_l1Verifications.txt" ] || [ "$filename" = "HermezSmt.txt" ] || [ "$filename" = "PlainCodeHash.txt" ] || [ "$filename" = "SyncStage.txt" ] || [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Phase 1 Expected differences in $filename" + # this is a list of files where we expect differences. + if is_in_array; then + echo "Phase 1 - Expected differences in $filename" else - echo "Phase 1 Unexpected differences in $filename" + # unwind tests failed + echo "Phase 1 - Error unexpected differences in $filename" + echo "Unwind failed" exit 1 fi fi done # now sync again -timeout $secondTimeout ./build/bin/cdk-erigon \ +# the data must match, if it doesn't match something is wrong, because if we unwinded returning to it should be the same. +echo -e '\nRunning erigon to the same stopBlock again \n' +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${stopBlock} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit="${stopBlock}" +echo -e '\nDumping data after unwind \n' # dump the data again into the post folder go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump2" -mkdir -p "$dataPath/phase2-diffs/pre" -mkdir -p "$dataPath/phase2-diffs/post" +# mkdir -p "$dataPath/phase2-diffs/pre" +# mkdir -p "$dataPath/phase2-diffs/post" # iterate over the files in the pre-dump folder -for file in $(ls $dataPath/phase2-dump1); do +for file in "$dataPath/phase2-dump1"/*; do # get the filename - filename=$(basename $file) + filename=$(basename "$file") # diff the files and if there is a difference found copy the pre and post files into the diffs folder - if cmp -s $dataPath/phase2-dump1/$filename $dataPath/phase2-dump2/$filename; then + if cmp -s "$dataPath/phase2-dump1/$filename" "$dataPath/phase2-dump2/$filename"; then echo "Phase 2 No difference found in $filename" else - if [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Phase 2 Expected differences in $filename" + # file where it should be different + if [ "$filename" = "BadHeaderNumber.txt" ]; then + echo "Phase 2 - Expected differences in $filename" else - echo "Phase 2 Unexpected differences in $filename" + echo "Phase 2 - Error unexpected differences in $filename" exit 2 fi fi done + +echo "No error" diff --git a/zk/utils/acc_input_hash.go b/zk/utils/acc_input_hash.go index 29c140d2018..7a66b899ee9 100644 --- a/zk/utils/acc_input_hash.go +++ b/zk/utils/acc_input_hash.go @@ -6,8 +6,6 @@ import ( "github.com/iden3/go-iden3-crypto/keccak256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon-lib/chain" - "errors" ) func CalculateBananaAccInputHash( @@ -197,99 +195,3 @@ func calculatePreEtrogAccInputHash( func CalculateBatchHashData(transactions []byte) []byte { return crypto.Keccak256(transactions) } - -type AccHashInputs struct { - // common - OldAccInputHash *common.Hash - Sequencer common.Address - BatchData []byte - - // etrog - L1InfoRoot *common.Hash - LimitTimestamp uint64 - ForcedBlockHash *common.Hash - - // pre etrog - GlobalExitRoot *common.Hash - Timestamp uint64 - - // validium - IsValidium bool - BatchTransactionData *common.Hash -} - -func CalculateAccInputHashByForkId(input AccHashInputs, forkId uint64) (*common.Hash, error) { - var newAccInputHash *common.Hash - - if forkId >= uint64(chain.ForkID7Etrog) { - // etrog - if !input.IsValidium { - // rollup - if input.BatchData == nil || len(input.BatchData) == 0 { - return nil, errors.New("batchData is required for etrog rollup") - } - if input.L1InfoRoot == nil { - return nil, errors.New("l1InfoRoot is required for etrog rollup") - } - if input.ForcedBlockHash == nil { - return nil, errors.New("forcedBlockHash is required for etrog rollup") - } - newAccInputHash = CalculateEtrogAccInputHash( - *input.OldAccInputHash, - input.BatchData, - *input.L1InfoRoot, - input.LimitTimestamp, - input.Sequencer, - *input.ForcedBlockHash, - ) - } else { - // validium - if input.L1InfoRoot == nil { - return nil, errors.New("l1InfoRoot is required for etrog validium") - } - if input.ForcedBlockHash == nil { - return nil, errors.New("forcedBlockHash is required for etrog validium") - } - newAccInputHash = CalculateEtrogValidiumAccInputHash( - *input.OldAccInputHash, - *input.BatchTransactionData, - *input.L1InfoRoot, - input.LimitTimestamp, - input.Sequencer, - *input.ForcedBlockHash, - ) - } - } else { - // pre-etrog - if !input.IsValidium { - // rollup - if input.BatchData == nil || len(input.BatchData) == 0 { - return nil, errors.New("batchData is required for pre-etrog rollup") - } - if input.GlobalExitRoot == nil { - return nil, errors.New("globalExitRoot is required for pre-etrog rollup") - } - newAccInputHash = CalculatePreEtrogAccInputHash( - *input.OldAccInputHash, - input.BatchData, - *input.GlobalExitRoot, - input.Timestamp, - input.Sequencer, - ) - } else { - // validium - if input.GlobalExitRoot == nil { - return nil, errors.New("globalExitRoot is required for pre-etrog validium") - } - newAccInputHash = CalculatePreEtrogValidiumAccInputHash( - *input.OldAccInputHash, - *input.BatchTransactionData, - *input.GlobalExitRoot, - input.Timestamp, - input.Sequencer, - ) - } - } - - return newAccInputHash, nil -} From 1a7e13bfcab4622e4fb4e06651e205fc60cecb1e Mon Sep 17 00:00:00 2001 From: Arthur Abeilice Date: Tue, 26 Nov 2024 16:12:58 +0700 Subject: [PATCH 73/88] feat(txpool): log nonce issues during transaction addition --- zk/txpool/pool.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index e019e13a139..7fe340a0ebd 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -1199,6 +1199,14 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) Disca return NotReplaced } + // Log nonce issue + log.Warn("Nonce issue detected", + "account", mt.Tx.SenderID, + "oldTxHash", hex.EncodeToString(found.Tx.IDHash[:]), + "newTxHash", hex.EncodeToString(mt.Tx.IDHash[:]), + "nonce", mt.Tx.Nonce, + ) + switch found.currentSubPool { case PendingSubPool: p.pending.Remove(found) From 9f8ee177e6472be601d45fd7f18d0d485d02e8f4 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:49:25 +0000 Subject: [PATCH 74/88] mined transactions removed from inclusion list (#1504) same approach we use for removing bad transactions from this same list --- zk/stages/stage_sequence_execute.go | 34 ++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index e1cca259113..72718589b88 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -378,7 +378,8 @@ func sequencingBatchStep( log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } - badTxIndexes := make([]int, 0) + badTxHashes := make([]common.Hash, 0) + minedTxHashes := make([]common.Hash, 0) for i, transaction := range batchState.blockState.transactionsForInclusion { txHash := transaction.Hash() effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) @@ -417,7 +418,7 @@ func sequencingBatchStep( // to stop the pool growing and hampering further processing of good transactions here // we mark it for being discarded log.Warn(fmt.Sprintf("[%s] error adding transaction to batch, discarding from pool", logPrefix), "hash", txHash, "err", err) - badTxIndexes = append(badTxIndexes, i) + badTxHashes = append(badTxHashes, txHash) batchState.blockState.transactionsToDiscard = append(batchState.blockState.transactionsToDiscard, batchState.blockState.transactionHashesToSlots[txHash]) } @@ -480,6 +481,7 @@ func sequencingBatchStep( if err == nil { blockDataSizeChecker = &backupDataSizeChecker batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) + minedTxHashes = append(minedTxHashes, txHash) } // We will only update the processed index in resequence job if there isn't overflow @@ -505,10 +507,23 @@ func sequencingBatchStep( } } - // remove transactions that have been marked for removal - for i := len(badTxIndexes) - 1; i >= 0; i-- { - idx := badTxIndexes[i] - batchState.blockState.transactionsForInclusion = append(batchState.blockState.transactionsForInclusion[:idx], batchState.blockState.transactionsForInclusion[idx+1:]...) + // remove bad and mined transactions from the list for inclusion + for i := len(batchState.blockState.transactionsForInclusion) - 1; i >= 0; i-- { + tx := batchState.blockState.transactionsForInclusion[i] + hash := tx.Hash() + for _, badHash := range badTxHashes { + if badHash == hash { + batchState.blockState.transactionsForInclusion = removeInclusionTransaction(batchState.blockState.transactionsForInclusion, i) + break + } + } + + for _, minedHash := range minedTxHashes { + if minedHash == hash { + batchState.blockState.transactionsForInclusion = removeInclusionTransaction(batchState.blockState.transactionsForInclusion, i) + break + } + } } if batchState.isL1Recovery() { @@ -610,3 +625,10 @@ func sequencingBatchStep( return sdb.tx.Commit() } + +func removeInclusionTransaction(orig []types.Transaction, index int) []types.Transaction { + if index < 0 || index >= len(orig) { + return orig + } + return append(orig[:index], orig[index+1:]...) +} From 9b78a486de7db3a992b25d1c86dbb1e8cba9467d Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 26 Nov 2024 14:41:19 +0000 Subject: [PATCH 75/88] check for contract code ending in an empty push pre fork 10 (#1159) * check for contract code ending in an empty push pre fork 10 * fix: wrong case usage * fixing panic on len == 0, going to -1 * remove hard coded 0x60 for push1 check --------- Co-authored-by: Arthur Abeilice --- core/vm/evm_zkevm.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/vm/evm_zkevm.go b/core/vm/evm_zkevm.go index 5e36ae007be..6c1fd87ce16 100644 --- a/core/vm/evm_zkevm.go +++ b/core/vm/evm_zkevm.go @@ -271,6 +271,14 @@ func (evm *EVM) call_zkevm(typ OpCode, caller ContractRef, addr libcommon.Addres var code []byte if !isPrecompile { code = evm.intraBlockState.GetCode(addr) + + // zk - up to fork 10 we cannot handle a contract code that ends with just a push and nothing to push to the stack + // so check for this scenario + if !evm.chainConfig.IsForkID10(evm.Context.BlockNumber) { + if len(code) > 0 && code[len(code)-1] == byte(PUSH1) { + return nil, gas, ErrInvalidCode + } + } } snapshot := evm.intraBlockState.Snapshot() From 4982e6f40e26a263f30063142fd4923067698c33 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 26 Nov 2024 14:42:36 +0000 Subject: [PATCH 76/88] purge routine for txpool (#1471) --- cmd/txpool/main.go | 8 +++- cmd/utils/flags.go | 16 +++++++ erigon-lib/txpool/txpoolcfg/txpoolcfg.go | 5 ++ eth/ethconfig/tx_pool.go | 2 + turbo/cli/default_flags.go | 2 + zk/txpool/pool.go | 59 +++++++++++++++++++++++- 6 files changed, 90 insertions(+), 2 deletions(-) diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 385842cc89f..c373c85d520 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -59,7 +59,9 @@ var ( noTxGossip bool - commitEvery time.Duration + commitEvery time.Duration + purgeEvery time.Duration + purgeDistance time.Duration ) func init() { @@ -85,6 +87,8 @@ func init() { rootCmd.PersistentFlags().Uint64Var(&priceBump, "txpool.pricebump", txpoolcfg.DefaultConfig.PriceBump, "Price bump percentage to replace an already existing transaction") rootCmd.PersistentFlags().Uint64Var(&blobPriceBump, "txpool.blobpricebump", txpoolcfg.DefaultConfig.BlobPriceBump, "Price bump percentage to replace an existing blob (type-3) transaction") rootCmd.PersistentFlags().DurationVar(&commitEvery, utils.TxPoolCommitEveryFlag.Name, utils.TxPoolCommitEveryFlag.Value, utils.TxPoolCommitEveryFlag.Usage) + rootCmd.PersistentFlags().DurationVar(&purgeEvery, utils.TxpoolPurgeEveryFlag.Name, utils.TxpoolPurgeEveryFlag.Value, utils.TxpoolPurgeEveryFlag.Usage) + rootCmd.PersistentFlags().DurationVar(&purgeDistance, utils.TxpoolPurgeDistanceFlag.Name, utils.TxpoolPurgeDistanceFlag.Value, utils.TxpoolPurgeDistanceFlag.Usage) rootCmd.PersistentFlags().BoolVar(&noTxGossip, utils.TxPoolGossipDisableFlag.Name, utils.TxPoolGossipDisableFlag.Value, utils.TxPoolGossipDisableFlag.Usage) rootCmd.Flags().StringSliceVar(&traceSenders, utils.TxPoolTraceSendersFlag.Name, []string{}, utils.TxPoolTraceSendersFlag.Usage) } @@ -144,6 +148,8 @@ func doTxpool(ctx context.Context, logger log.Logger) error { cfg.DBDir = dirs.TxPool cfg.CommitEvery = common2.RandomizeDuration(commitEvery) + cfg.PurgeEvery = common2.RandomizeDuration(purgeEvery) + cfg.PurgeDistance = purgeDistance cfg.PendingSubPoolLimit = pendingPoolLimit cfg.BaseFeeSubPoolLimit = baseFeePoolLimit cfg.QueuedSubPoolLimit = queuedPoolLimit diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e9810955ab4..319b3437fbf 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -237,6 +237,16 @@ var ( Usage: "How often transactions should be committed to the storage", Value: txpoolcfg.DefaultConfig.CommitEvery, } + TxpoolPurgeEveryFlag = cli.DurationFlag{ + Name: "txpool.purge.every", + Usage: "How often transactions should be purged from the storage", + Value: txpoolcfg.DefaultConfig.PurgeEvery, + } + TxpoolPurgeDistanceFlag = cli.DurationFlag{ + Name: "txpool.purge.distance", + Usage: "Transactions older than this distance will be purged", + Value: txpoolcfg.DefaultConfig.PurgeDistance, + } // Miner settings MiningEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -1919,6 +1929,12 @@ func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) { fullCfg.TxPool.BlobPriceBump = ctx.Uint64(TxPoolBlobPriceBumpFlag.Name) } cfg.CommitEvery = common2.RandomizeDuration(ctx.Duration(TxPoolCommitEveryFlag.Name)) + + purgeEvery := ctx.Duration(TxpoolPurgeEveryFlag.Name) + purgeDistance := ctx.Duration(TxpoolPurgeDistanceFlag.Name) + + fullCfg.TxPool.PurgeEvery = common2.RandomizeDuration(purgeEvery) + fullCfg.TxPool.PurgeDistance = purgeDistance } func setEthash(ctx *cli.Context, datadir string, cfg *ethconfig.Config) { diff --git a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go index 54f5e0dfa9a..0ee6d97bed5 100644 --- a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go +++ b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go @@ -52,6 +52,7 @@ type Config struct { ProcessRemoteTxsEvery time.Duration CommitEvery time.Duration LogEvery time.Duration + PurgeEvery time.Duration //txpool db MdbxPageSize datasize.ByteSize @@ -59,6 +60,8 @@ type Config struct { MdbxGrowthStep datasize.ByteSize NoGossip bool // this mode doesn't broadcast any txs, and if receive remote-txn - skip it + + PurgeDistance time.Duration } var DefaultConfig = Config{ @@ -66,6 +69,8 @@ var DefaultConfig = Config{ ProcessRemoteTxsEvery: 100 * time.Millisecond, CommitEvery: 15 * time.Second, LogEvery: 30 * time.Second, + PurgeEvery: 1 * time.Minute, + PurgeDistance: 24 * time.Hour, PendingSubPoolLimit: 10_000, BaseFeeSubPoolLimit: 10_000, diff --git a/eth/ethconfig/tx_pool.go b/eth/ethconfig/tx_pool.go index 8909339f822..c2813475aab 100644 --- a/eth/ethconfig/tx_pool.go +++ b/eth/ethconfig/tx_pool.go @@ -76,6 +76,8 @@ var DefaultTxPool2Config = func(fullCfg *Config) txpoolcfg.Config { cfg.CommitEvery = 5 * time.Minute cfg.TracedSenders = pool1Cfg.TracedSenders cfg.CommitEvery = pool1Cfg.CommitEvery + cfg.PurgeEvery = fullCfg.TxPool.PurgeEvery + cfg.PurgeDistance = fullCfg.TxPool.PurgeDistance return cfg } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 7ea5b33a24f..618a6c5bdc1 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -29,6 +29,8 @@ var DefaultFlags = []cli.Flag{ &utils.TxPoolLifetimeFlag, &utils.TxPoolTraceSendersFlag, &utils.TxPoolCommitEveryFlag, + &utils.TxpoolPurgeEveryFlag, + &utils.TxpoolPurgeDistanceFlag, &PruneFlag, &PruneHistoryFlag, &PruneReceiptFlag, diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index e019e13a139..78b5ca988dc 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -149,6 +149,7 @@ const ( DiscardByLimbo DiscardReason = 27 SmartContractDeploymentDisabled DiscardReason = 28 // to == null not allowed, config set to block smart contract deployment GasLimitTooHigh DiscardReason = 29 // gas limit is too high + Expired DiscardReason = 30 // used when a transaction is purged from the pool ) func (r DiscardReason) String() string { @@ -228,13 +229,14 @@ type metaTx struct { bestIndex int worstIndex int timestamp uint64 // when it was added to pool + created uint64 // unix timestamp of creation subPool SubPoolMarker currentSubPool SubPoolType alreadyYielded bool } func newMetaTx(slot *types.TxSlot, isLocal bool, timestmap uint64) *metaTx { - mt := &metaTx{Tx: slot, worstIndex: -1, bestIndex: -1, timestamp: timestmap} + mt := &metaTx{Tx: slot, worstIndex: -1, bestIndex: -1, timestamp: timestmap, created: uint64(time.Now().Unix())} if isLocal { mt.subPool = IsLocal } @@ -1393,6 +1395,8 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs defer commitEvery.Stop() logEvery := time.NewTicker(p.cfg.LogEvery) defer logEvery.Stop() + purgeEvery := time.NewTicker(p.cfg.PurgeEvery) + defer purgeEvery.Stop() for { select { @@ -1522,6 +1526,8 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs types, sizes, hashes = p.AppendAllAnnouncements(types, sizes, hashes[:0]) go send.PropagatePooledTxsToPeersList(newPeers, types, sizes, hashes) propagateToNewPeerTimer.UpdateDuration(t) + case <-purgeEvery.C: + p.purge() } } } @@ -1835,6 +1841,57 @@ func (p *TxPool) deprecatedForEach(_ context.Context, f func(rlp []byte, sender }) } +func (p *TxPool) purge() { + p.lock.Lock() + defer p.lock.Unlock() + + // go through all transactions and remove the ones that have a timestamp older than the purge time in config + cutOff := uint64(time.Now().Add(-p.cfg.PurgeDistance).Unix()) + log.Debug("[txpool] purging", "cutOff", cutOff) + + toDelete := make([]*metaTx, 0) + + p.all.ascendAll(func(mt *metaTx) bool { + // don't purge from pending + if mt.currentSubPool == PendingSubPool { + return true + } + if mt.created < cutOff { + toDelete = append(toDelete, mt) + } + return true + }) + + for _, mt := range toDelete { + switch mt.currentSubPool { + case PendingSubPool: + p.pending.Remove(mt) + case BaseFeeSubPool: + p.baseFee.Remove(mt) + case QueuedSubPool: + p.queued.Remove(mt) + default: + //already removed + } + + p.discardLocked(mt, Expired) + + // do not hold on to the discard reason as we're purging it completely from the pool and an end user + // may wish to resubmit it and we should allow this + p.discardReasonsLRU.Remove(string(mt.Tx.IDHash[:])) + + // get the address of the sender + addr := common.Address{} + if checkAddr, ok := p.senders.senderID2Addr[mt.Tx.SenderID]; ok { + addr = checkAddr + } + log.Debug("[txpool] purge", + "sender", addr, + "hash", hex.EncodeToString(mt.Tx.IDHash[:]), + "ts", mt.created) + } +} + // CalcIntrinsicGas computes the 'intrinsic gas' for a message with the given data. func CalcIntrinsicGas(dataLen, dataNonZeroLen uint64, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isShanghai bool) (uint64, DiscardReason) { // Set the starting gas for the raw transaction From 7deae3bf75c2bd6c029253366c82767daf5598a3 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Tue, 26 Nov 2024 12:15:13 -0300 Subject: [PATCH 77/88] test: add unit test for Stage: L1 syncer (#1474) --- .../commands/mocks/l1_syncer_mock.go | 151 --------- erigon-lib/direct/sentry_client_mock.go | 16 +- turbo/jsonrpc/zkevm_api_test.go | 2 +- zk/stages/stage_l1_info_tree_test.go | 2 +- zk/stages/stage_l1_sequencer_sync_test.go | 2 +- .../{stage_l1syncer.go => stage_l1_syncer.go} | 16 +- zk/stages/stage_l1_syncer_test.go | 316 +++++++++++++++++ zk/syncer/l1_syncer.go | 2 + zk/syncer/mocks/etherman_mock.go | 318 ++++++++++++++++++ 9 files changed, 651 insertions(+), 174 deletions(-) delete mode 100644 cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go rename zk/stages/{stage_l1syncer.go => stage_l1_syncer.go} (96%) create mode 100644 zk/stages/stage_l1_syncer_test.go create mode 100644 zk/syncer/mocks/etherman_mock.go diff --git a/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go b/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go deleted file mode 100644 index 86825d378f3..00000000000 --- a/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go +++ /dev/null @@ -1,151 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: /home/rr/Documentos/Iden3/cdk-erigon/zk/syncer/l1_syncer.go -// -// Generated by this command: -// -// mockgen -source /home/rr/Documentos/Iden3/cdk-erigon/zk/syncer/l1_syncer.go -destination /home/rr/Documentos/Iden3/cdk-erigon/cmd/rpcdaemon/commands/mock/l1_syncer_mock.go -package=mocks -// - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - big "math/big" - reflect "reflect" - - common "github.com/ledgerwatch/erigon-lib/common" - ethereum "github.com/ledgerwatch/erigon" - types "github.com/ledgerwatch/erigon/core/types" - gomock "go.uber.org/mock/gomock" -) - -// MockIEtherman is a mock of IEtherman interface. -type MockIEtherman struct { - ctrl *gomock.Controller - recorder *MockIEthermanMockRecorder -} - -// MockIEthermanMockRecorder is the mock recorder for MockIEtherman. -type MockIEthermanMockRecorder struct { - mock *MockIEtherman -} - -// NewMockIEtherman creates a new mock instance. -func NewMockIEtherman(ctrl *gomock.Controller) *MockIEtherman { - mock := &MockIEtherman{ctrl: ctrl} - mock.recorder = &MockIEthermanMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockIEtherman) EXPECT() *MockIEthermanMockRecorder { - return m.recorder -} - -// BlockByNumber mocks base method. -func (m *MockIEtherman) BlockByNumber(ctx context.Context, blockNumber *big.Int) (*types.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BlockByNumber", ctx, blockNumber) - ret0, _ := ret[0].(*types.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BlockByNumber indicates an expected call of BlockByNumber. -func (mr *MockIEthermanMockRecorder) BlockByNumber(ctx, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockIEtherman)(nil).BlockByNumber), ctx, blockNumber) -} - -// CallContract mocks base method. -func (m *MockIEtherman) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CallContract", ctx, msg, blockNumber) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CallContract indicates an expected call of CallContract. -func (mr *MockIEthermanMockRecorder) CallContract(ctx, msg, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallContract", reflect.TypeOf((*MockIEtherman)(nil).CallContract), ctx, msg, blockNumber) -} - -// CallContract indicates an expected call of CallContract. -func (m *MockIEtherman) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageAt", ctx, contract, key, blockNumber) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CallContract indicates an expected call of CallContract. -func (mr *MockIEthermanMockRecorder) StorageAt(ctx, contract, key, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAt", reflect.TypeOf((*MockIEtherman)(nil).StorageAt), ctx, contract, key, blockNumber) -} - - -// FilterLogs mocks base method. -func (m *MockIEtherman) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterLogs", ctx, query) - ret0, _ := ret[0].([]types.Log) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FilterLogs indicates an expected call of FilterLogs. -func (mr *MockIEthermanMockRecorder) FilterLogs(ctx, query any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterLogs", reflect.TypeOf((*MockIEtherman)(nil).FilterLogs), ctx, query) -} - -// HeaderByNumber mocks base method. -func (m *MockIEtherman) HeaderByNumber(ctx context.Context, blockNumber *big.Int) (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeaderByNumber", ctx, blockNumber) - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeaderByNumber indicates an expected call of HeaderByNumber. -func (mr *MockIEthermanMockRecorder) HeaderByNumber(ctx, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockIEtherman)(nil).HeaderByNumber), ctx, blockNumber) -} - -// TransactionByHash mocks base method. -func (m *MockIEtherman) TransactionByHash(ctx context.Context, hash common.Hash) (types.Transaction, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TransactionByHash", ctx, hash) - ret0, _ := ret[0].(types.Transaction) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// TransactionByHash indicates an expected call of TransactionByHash. -func (mr *MockIEthermanMockRecorder) TransactionByHash(ctx, hash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByHash", reflect.TypeOf((*MockIEtherman)(nil).TransactionByHash), ctx, hash) -} - -// TransactionReceipt mocks base method. -func (m *MockIEtherman) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TransactionReceipt", ctx, txHash) - ret0, _ := ret[0].(*types.Receipt) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TransactionReceipt indicates an expected call of TransactionReceipt. -func (mr *MockIEthermanMockRecorder) TransactionReceipt(ctx, txHash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionReceipt", reflect.TypeOf((*MockIEtherman)(nil).TransactionReceipt), ctx, txHash) -} diff --git a/erigon-lib/direct/sentry_client_mock.go b/erigon-lib/direct/sentry_client_mock.go index 48074023d41..3cf18f11298 100644 --- a/erigon-lib/direct/sentry_client_mock.go +++ b/erigon-lib/direct/sentry_client_mock.go @@ -10,14 +10,14 @@ package direct import ( - "context" - "reflect" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "go.uber.org/mock/gomock" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" + context "context" + reflect "reflect" + + sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + types "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + gomock "go.uber.org/mock/gomock" + grpc "google.golang.org/grpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) // MockSentryClient is a mock of SentryClient interface. diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index c812cd53230..8746a0cecd5 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" @@ -30,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" rpctypes "github.com/ledgerwatch/erigon/zk/rpcdaemon" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" diff --git a/zk/stages/stage_l1_info_tree_test.go b/zk/stages/stage_l1_info_tree_test.go index c2e3a93d511..585a1ad299a 100644 --- a/zk/stages/stage_l1_info_tree_test.go +++ b/zk/stages/stage_l1_info_tree_test.go @@ -10,7 +10,6 @@ import ( ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -20,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go index 5dc1f836dbb..e69d5eedf13 100644 --- a/zk/stages/stage_l1_sequencer_sync_test.go +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -9,7 +9,6 @@ import ( ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -19,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1_syncer.go similarity index 96% rename from zk/stages/stage_l1syncer.go rename to zk/stages/stage_l1_syncer.go index 5c78f7209ec..40edbc1a082 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1_syncer.go @@ -25,7 +25,6 @@ import ( ) type IL1Syncer interface { - // atomic IsSyncStarted() bool IsDownloading() bool @@ -46,8 +45,7 @@ type IL1Syncer interface { } var ( - ErrStateRootMismatch = errors.New("state root mismatch") - + ErrStateRootMismatch = errors.New("state root mismatch") lastCheckedL1BlockCounter = metrics.GetOrCreateGauge(`last_checked_l1_block`) ) @@ -266,15 +264,9 @@ func parseLogType(l1RollupId uint64, log *ethTypes.Log) (l1BatchInfo types.L1Bat batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() stateRoot = common.BytesToHash(log.Data[:32]) case contracts.VerificationValidiumTopicEtrog: - bigRollupId := new(big.Int).SetUint64(l1RollupId) - isRollupIdMatching := log.Topics[1] == common.BigToHash(bigRollupId) - if isRollupIdMatching { - batchLogType = logVerifyEtrog - batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() - stateRoot = common.BytesToHash(log.Data[:32]) - } else { - batchLogType = logIncompatible - } + batchLogType = logVerifyEtrog + batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() + stateRoot = common.BytesToHash(log.Data[:32]) case contracts.VerificationTopicEtrog: bigRollupId := new(big.Int).SetUint64(l1RollupId) isRollupIdMatching := log.Topics[1] == common.BigToHash(bigRollupId) diff --git a/zk/stages/stage_l1_syncer_test.go b/zk/stages/stage_l1_syncer_test.go new file mode 100644 index 00000000000..dd7bb76e71f --- /dev/null +++ b/zk/stages/stage_l1_syncer_test.go @@ -0,0 +1,316 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnStageL1Syncer(t *testing.T) { + // Arrange + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + l1FirstBlock := big.NewInt(20) + l2BlockNumber := uint64(10) + verifiedBatchNumber := uint64(2) + + hDB := hermez_db.NewHermezDb(tx) + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + err = hDB.WriteBlockBatch(l2BlockNumber-1, verifiedBatchNumber-1) + require.NoError(t, err) + err = hDB.WriteBlockBatch(l2BlockNumber, verifiedBatchNumber) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.L1Syncer, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.IntermediateHashes, l2BlockNumber-1) + require.NoError(t, err) + + err = hDB.WriteVerification(l1FirstBlock.Uint64(), verifiedBatchNumber-1, common.HexToHash("0x1"), common.HexToHash("0x99990")) + require.NoError(t, err) + err = hDB.WriteVerification(l1FirstBlock.Uint64(), verifiedBatchNumber, common.HexToHash("0x2"), common.HexToHash("0x99999")) + require.NoError(t, err) + + genesisHeader := &types.Header{ + Number: big.NewInt(0).SetUint64(l2BlockNumber - 1), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 8000000, + GasUsed: 0, + ParentHash: common.HexToHash("0x1"), + TxHash: common.HexToHash("0x2"), + ReceiptHash: common.HexToHash("0x3"), + Root: common.HexToHash("0x99990"), + } + + txs := []types.Transaction{} + uncles := []*types.Header{} + receipts := []*types.Receipt{} + withdrawals := []*types.Withdrawal{} + + genesisBlock := types.NewBlock(genesisHeader, txs, uncles, receipts, withdrawals) + + err = rawdb.WriteBlock(tx, genesisBlock) + require.NoError(t, err) + err = rawdb.WriteCanonicalHash(tx, genesisBlock.Hash(), genesisBlock.NumberU64()) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.L1Syncer, BlockNumber: 0} + u := &stagedsync.Sync{} + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + EthermanMock := mocks.NewMockIEtherman(mockCtrl) + + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + latestBlockParentHash := common.HexToHash("0x123456789") + latestBlockTime := uint64(time.Now().Unix()) + latestBlockNumber := big.NewInt(21) + latestBlockHeader := &types.Header{ParentHash: latestBlockParentHash, Number: latestBlockNumber, Time: latestBlockTime} + latestBlock := types.NewBlockWithHeader(latestBlockHeader) + + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestBlock, nil).AnyTimes() + + filterQuery := ethereum.FilterQuery{ + FromBlock: l1FirstBlock, + ToBlock: latestBlockNumber, + Addresses: l1ContractAddresses, + Topics: l1ContractTopics, + } + + const rollupID = uint64(1) + + type testCase struct { + name string + getLog func(hDB *hermez_db.HermezDb) (types.Log, error) + assert func(t *testing.T, hDB *hermez_db.HermezDb) + } + + testCases := []testCase{ + { + name: "SequencedBatchTopicPreEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(1) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x1") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.SequencedBatchTopicPreEtrog, batchNumHash}, + TxHash: txHash, + Data: []byte{}, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(1) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(1)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x1").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.Hash{}.String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "SequencedBatchTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(2) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x2") + l1InfoRoot := common.HexToHash("0x3") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.SequencedBatchTopicEtrog, batchNumHash}, + Data: l1InfoRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(2) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(2)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x2").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.Hash{}.String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.HexToHash("0x3").String()) + }, + }, + { + name: "VerificationTopicPreEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(3) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x4") + stateRoot := common.HexToHash("0x5") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationTopicPreEtrog, batchNumHash}, + Data: stateRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(3) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(3)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x4").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x5").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "VerificationValidiumTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(4) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x4") + stateRoot := common.HexToHash("0x5") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationValidiumTopicEtrog, batchNumHash}, + Data: stateRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(4) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(4)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x4").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x5").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "VerificationTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + batchNum := uint64(5) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x6") + stateRoot := common.HexToHash("0x7") + data := append(batchNumHash.Bytes(), stateRoot.Bytes()...) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationTopicEtrog, rollupIDHash}, + Data: data, + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(5) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(5)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x6").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x7").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "RollbackBatchesTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + blockNum := uint64(10) + batchNum := uint64(20) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x888") + stateRoot := common.HexToHash("0x999") + l1InfoRoot := common.HexToHash("0x101010") + + for i := uint64(15); i <= uint64(25); i++ { + err := hDB.WriteSequence(blockNum, i, txHash, stateRoot, l1InfoRoot) + require.NoError(t, err) + } + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.RollbackBatchesTopic, batchNumHash}, + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + for i := uint64(15); i <= uint64(20); i++ { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(i) + require.NotNil(t, l1BatchInfo) + require.NoError(t, err) + } + for i := uint64(21); i <= uint64(25); i++ { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(i) + require.Nil(t, l1BatchInfo) + require.NoError(t, err) + } + }, + }, + } + + filteredLogs := []types.Log{} + for _, tc := range testCases { + ll, err := tc.getLog(hDB) + require.NoError(t, err) + filteredLogs = append(filteredLogs, ll) + } + + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + + zkCfg := ðconfig.Zk{ + L1RollupId: rollupID, + L1FirstBlock: l1FirstBlock.Uint64(), + } + cfg := StageL1SyncerCfg(db1, l1Syncer, zkCfg) + quiet := false + + // Act + err = SpawnStageL1Syncer(s, u, ctx, tx, cfg, quiet) + require.NoError(t, err) + + // Assert + for _, tc := range testCases { + tc.assert(t, hDB) + } +} diff --git a/zk/syncer/l1_syncer.go b/zk/syncer/l1_syncer.go index 92247149eb6..0ee815d9e7a 100644 --- a/zk/syncer/l1_syncer.go +++ b/zk/syncer/l1_syncer.go @@ -35,6 +35,8 @@ const ( sequencedBatchesMapSignature = "0xb4d63f58" ) +//go:generate mockgen -typed=true -destination=./mocks/etherman_mock.go -package=mocks . IEtherman + type IEtherman interface { HeaderByNumber(ctx context.Context, blockNumber *big.Int) (*ethTypes.Header, error) BlockByNumber(ctx context.Context, blockNumber *big.Int) (*ethTypes.Block, error) diff --git a/zk/syncer/mocks/etherman_mock.go b/zk/syncer/mocks/etherman_mock.go new file mode 100644 index 00000000000..0d55810bb51 --- /dev/null +++ b/zk/syncer/mocks/etherman_mock.go @@ -0,0 +1,318 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/syncer (interfaces: IEtherman) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./mocks/etherman_mock.go -package=mocks . IEtherman +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + big "math/big" + reflect "reflect" + + ethereum "github.com/ledgerwatch/erigon" + common "github.com/ledgerwatch/erigon-lib/common" + types "github.com/ledgerwatch/erigon/core/types" + gomock "go.uber.org/mock/gomock" +) + +// MockIEtherman is a mock of IEtherman interface. +type MockIEtherman struct { + ctrl *gomock.Controller + recorder *MockIEthermanMockRecorder +} + +// MockIEthermanMockRecorder is the mock recorder for MockIEtherman. +type MockIEthermanMockRecorder struct { + mock *MockIEtherman +} + +// NewMockIEtherman creates a new mock instance. +func NewMockIEtherman(ctrl *gomock.Controller) *MockIEtherman { + mock := &MockIEtherman{ctrl: ctrl} + mock.recorder = &MockIEthermanMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIEtherman) EXPECT() *MockIEthermanMockRecorder { + return m.recorder +} + +// BlockByNumber mocks base method. +func (m *MockIEtherman) BlockByNumber(arg0 context.Context, arg1 *big.Int) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByNumber", arg0, arg1) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByNumber indicates an expected call of BlockByNumber. +func (mr *MockIEthermanMockRecorder) BlockByNumber(arg0, arg1 any) *MockIEthermanBlockByNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockIEtherman)(nil).BlockByNumber), arg0, arg1) + return &MockIEthermanBlockByNumberCall{Call: call} +} + +// MockIEthermanBlockByNumberCall wrap *gomock.Call +type MockIEthermanBlockByNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanBlockByNumberCall) Return(arg0 *types.Block, arg1 error) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanBlockByNumberCall) Do(f func(context.Context, *big.Int) (*types.Block, error)) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanBlockByNumberCall) DoAndReturn(f func(context.Context, *big.Int) (*types.Block, error)) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CallContract mocks base method. +func (m *MockIEtherman) CallContract(arg0 context.Context, arg1 ethereum.CallMsg, arg2 *big.Int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CallContract", arg0, arg1, arg2) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CallContract indicates an expected call of CallContract. +func (mr *MockIEthermanMockRecorder) CallContract(arg0, arg1, arg2 any) *MockIEthermanCallContractCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallContract", reflect.TypeOf((*MockIEtherman)(nil).CallContract), arg0, arg1, arg2) + return &MockIEthermanCallContractCall{Call: call} +} + +// MockIEthermanCallContractCall wrap *gomock.Call +type MockIEthermanCallContractCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanCallContractCall) Return(arg0 []byte, arg1 error) *MockIEthermanCallContractCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanCallContractCall) Do(f func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *MockIEthermanCallContractCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanCallContractCall) DoAndReturn(f func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *MockIEthermanCallContractCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FilterLogs mocks base method. +func (m *MockIEtherman) FilterLogs(arg0 context.Context, arg1 ethereum.FilterQuery) ([]types.Log, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FilterLogs", arg0, arg1) + ret0, _ := ret[0].([]types.Log) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FilterLogs indicates an expected call of FilterLogs. +func (mr *MockIEthermanMockRecorder) FilterLogs(arg0, arg1 any) *MockIEthermanFilterLogsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterLogs", reflect.TypeOf((*MockIEtherman)(nil).FilterLogs), arg0, arg1) + return &MockIEthermanFilterLogsCall{Call: call} +} + +// MockIEthermanFilterLogsCall wrap *gomock.Call +type MockIEthermanFilterLogsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanFilterLogsCall) Return(arg0 []types.Log, arg1 error) *MockIEthermanFilterLogsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanFilterLogsCall) Do(f func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *MockIEthermanFilterLogsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanFilterLogsCall) DoAndReturn(f func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *MockIEthermanFilterLogsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// HeaderByNumber mocks base method. +func (m *MockIEtherman) HeaderByNumber(arg0 context.Context, arg1 *big.Int) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByNumber", arg0, arg1) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByNumber indicates an expected call of HeaderByNumber. +func (mr *MockIEthermanMockRecorder) HeaderByNumber(arg0, arg1 any) *MockIEthermanHeaderByNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockIEtherman)(nil).HeaderByNumber), arg0, arg1) + return &MockIEthermanHeaderByNumberCall{Call: call} +} + +// MockIEthermanHeaderByNumberCall wrap *gomock.Call +type MockIEthermanHeaderByNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanHeaderByNumberCall) Return(arg0 *types.Header, arg1 error) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanHeaderByNumberCall) Do(f func(context.Context, *big.Int) (*types.Header, error)) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanHeaderByNumberCall) DoAndReturn(f func(context.Context, *big.Int) (*types.Header, error)) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// StorageAt mocks base method. +func (m *MockIEtherman) StorageAt(arg0 context.Context, arg1 common.Address, arg2 common.Hash, arg3 *big.Int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageAt", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageAt indicates an expected call of StorageAt. +func (mr *MockIEthermanMockRecorder) StorageAt(arg0, arg1, arg2, arg3 any) *MockIEthermanStorageAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAt", reflect.TypeOf((*MockIEtherman)(nil).StorageAt), arg0, arg1, arg2, arg3) + return &MockIEthermanStorageAtCall{Call: call} +} + +// MockIEthermanStorageAtCall wrap *gomock.Call +type MockIEthermanStorageAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanStorageAtCall) Return(arg0 []byte, arg1 error) *MockIEthermanStorageAtCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanStorageAtCall) Do(f func(context.Context, common.Address, common.Hash, *big.Int) ([]byte, error)) *MockIEthermanStorageAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanStorageAtCall) DoAndReturn(f func(context.Context, common.Address, common.Hash, *big.Int) ([]byte, error)) *MockIEthermanStorageAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TransactionByHash mocks base method. +func (m *MockIEtherman) TransactionByHash(arg0 context.Context, arg1 common.Hash) (types.Transaction, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionByHash", arg0, arg1) + ret0, _ := ret[0].(types.Transaction) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// TransactionByHash indicates an expected call of TransactionByHash. +func (mr *MockIEthermanMockRecorder) TransactionByHash(arg0, arg1 any) *MockIEthermanTransactionByHashCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByHash", reflect.TypeOf((*MockIEtherman)(nil).TransactionByHash), arg0, arg1) + return &MockIEthermanTransactionByHashCall{Call: call} +} + +// MockIEthermanTransactionByHashCall wrap *gomock.Call +type MockIEthermanTransactionByHashCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanTransactionByHashCall) Return(arg0 types.Transaction, arg1 bool, arg2 error) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanTransactionByHashCall) Do(f func(context.Context, common.Hash) (types.Transaction, bool, error)) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanTransactionByHashCall) DoAndReturn(f func(context.Context, common.Hash) (types.Transaction, bool, error)) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TransactionReceipt mocks base method. +func (m *MockIEtherman) TransactionReceipt(arg0 context.Context, arg1 common.Hash) (*types.Receipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionReceipt", arg0, arg1) + ret0, _ := ret[0].(*types.Receipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionReceipt indicates an expected call of TransactionReceipt. +func (mr *MockIEthermanMockRecorder) TransactionReceipt(arg0, arg1 any) *MockIEthermanTransactionReceiptCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionReceipt", reflect.TypeOf((*MockIEtherman)(nil).TransactionReceipt), arg0, arg1) + return &MockIEthermanTransactionReceiptCall{Call: call} +} + +// MockIEthermanTransactionReceiptCall wrap *gomock.Call +type MockIEthermanTransactionReceiptCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanTransactionReceiptCall) Return(arg0 *types.Receipt, arg1 error) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanTransactionReceiptCall) Do(f func(context.Context, common.Hash) (*types.Receipt, error)) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanTransactionReceiptCall) DoAndReturn(f func(context.Context, common.Hash) (*types.Receipt, error)) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.DoAndReturn(f) + return c +} From 5b84192bd5df147ba043c34ab5dcd82968dc785c Mon Sep 17 00:00:00 2001 From: Arthur Abeilice Date: Tue, 26 Nov 2024 22:54:44 +0700 Subject: [PATCH 78/88] feat(txpool): changes requested on PR --- zk/txpool/pool.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index 7fe340a0ebd..d407779b89c 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -1196,12 +1196,13 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) Disca if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) { return NotSet } + log.Info(fmt.Sprintf("Transaction %s was attempted")) return NotReplaced } // Log nonce issue - log.Warn("Nonce issue detected", - "account", mt.Tx.SenderID, + log.Info("Transaction is to be replaced", + "account", p.senders.senderID2Addr[mt.Tx.SenderID], "oldTxHash", hex.EncodeToString(found.Tx.IDHash[:]), "newTxHash", hex.EncodeToString(mt.Tx.IDHash[:]), "nonce", mt.Tx.Nonce, From fbdcb04687ab566fd077ba75e768442b589c3918 Mon Sep 17 00:00:00 2001 From: Arthur Abeilice Date: Wed, 27 Nov 2024 17:08:17 +0700 Subject: [PATCH 79/88] feat(txpool): enhance logging for transaction replacement attempts --- zk/txpool/pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index d407779b89c..9c31835af7a 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -1196,7 +1196,7 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) Disca if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) { return NotSet } - log.Info(fmt.Sprintf("Transaction %s was attempted")) + log.Info(fmt.Sprintf("Transaction %s was attempted to be replaced.", hex.EncodeToString(mt.Tx.IDHash[:]))) return NotReplaced } From cb614c8b066d5b043e4338f279599e4b29e6206b Mon Sep 17 00:00:00 2001 From: laisolizq <37299818+laisolizq@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:38:00 +0100 Subject: [PATCH 80/88] Fix modexp checks (#1494) * Fix modexp checks * remove small comment for modexp * update modexp tests --------- Co-authored-by: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Co-authored-by: Scott Fairclough --- core/vm/contracts_zkevm.go | 95 +++++++++++-------- core/vm/contracts_zkevm_test.go | 156 +++++++++++++++++++++++++------- 2 files changed, 183 insertions(+), 68 deletions(-) diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 76263456c79..e2ab0d1c5f3 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -300,30 +300,6 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { input = input[:0] } - // Retrieve the operands and execute the exponentiation - var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen.Uint64())) - mod = new(big.Int).SetBytes(getData(input, baseLen.Uint64()+expLen.Uint64(), modLen.Uint64())) - baseBitLen = base.BitLen() - modBitLen = mod.BitLen() - ) - - // zk special cases - // - if mod = 0 we consume gas as normal - // - if base is 0 and mod < 8192 we consume gas as normal - // - if neither of the above are true we check for reverts and return 0 gas fee - if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { - return 0 - } else if modBitLen == 0 { - // consume as normal - will return 0 - } else if baseBitLen == 0 { - if modLen.Uint64() > 1024 { - return 0 - } else { - // consume as normal - will return 0 - } - } - // Retrieve the head 32 bytes of exp for the adjusted exponent length var expHead *big.Int if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 { @@ -359,7 +335,16 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { //where is x is max(length_of_MODULUS, length_of_BASE) gas = gas.Add(gas, big7) gas = gas.Div(gas, big8) + // word = ceiling(x/8) + // if gas(word) > MAX_GAS_WORD_MODEXP --> out of gas + if gas.Uint64() > 9487 { + return math.MaxUint64 + } gas.Mul(gas, gas) + // if adjExpLen > MAX_GAS_IT_MODEXP --> out of gas + if adjExpLen.Uint64() > 90000000 { + return math.MaxUint64 + } gas.Mul(gas, math.BigMax(adjExpLen, big1)) // 2. Different divisor (`GQUADDIVISOR`) (3) @@ -371,6 +356,21 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { if gas.Uint64() < 200 { return 200 } + // zk special cases + // - if mod = 0 we consume gas as normal + // - if base is 0 and mod < 8192 we consume gas as normal + // - if neither of the above are true we check for reverts and return 0 gas fee + if modLen.Uint64() == 0 { + // consume as normal - will return 0 + } else if baseLen.Uint64() == 0 { + if modLen.Uint64() > 1024 { + return 0 + } else { + // consume as normal - will return 0 + } + } else if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { + return 0 + } return gas.Uint64() } gas = modexpMultComplexity(gas) @@ -380,6 +380,21 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { if gas.BitLen() > 64 { return math.MaxUint64 } + // zk special cases + // - if mod = 0 we consume gas as normal + // - if base is 0 and mod < 8192 we consume gas as normal + // - if neither of the above are true we check for reverts and return 0 gas fee + if modLen.Uint64() == 0 { + // consume as normal - will return 0 + } else if baseLen.Uint64() == 0 { + if modLen.Uint64() > 1024 { + return 0 + } else { + // consume as normal - will return 0 + } + } else if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { + return 0 + } return gas.Uint64() } @@ -392,9 +407,24 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + ) + + if modLen == 0 { + // normal execution + } else if baseLen == 0 { + if modLen > 1024 { + return nil, ErrExecutionReverted + } else { + // normal execution + } + } else if baseLen > 1024 || expLen > 1024 || modLen > 1024 { + return nil, ErrExecutionReverted + } + + var ( + base = new(big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) ) // Extract `base`, `exp`, and `mod` with padding as needed @@ -423,21 +453,12 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { modBitLen = mod.BitLen() ) - // limit to 8192 bits for base, exp, and mod in ZK - if baseLen > 1024 || expLen > 1024 || modLen > 1024 { - return nil, ErrExecutionReverted - } - if modBitLen == 0 { return common.LeftPadBytes([]byte{}, int(modLen)), nil } if baseBitLen == 0 { - if modLen > 1024 { - return nil, ErrExecutionReverted - } else { - return common.LeftPadBytes([]byte{}, int(modLen)), nil - } + return common.LeftPadBytes([]byte{}, int(modLen)), nil } switch { diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go index 39feedc1640..890dda4f27e 100644 --- a/core/vm/contracts_zkevm_test.go +++ b/core/vm/contracts_zkevm_test.go @@ -69,6 +69,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { modExp := bigModExp_zkevm{enabled: true, eip2565: true} cases := map[string]struct { + lenBase int + lenExp int + lenMod int base string exp string mod string @@ -76,6 +79,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { revert bool }{ "simple test": { + 1, + 1, + 1, big10, big10, big10, @@ -83,6 +89,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { false, }, "0 mod - normal gas": { + 1, + 1, + 1, big10, big10, big0, @@ -90,6 +99,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { false, }, "base 0 - mod < 8192 - normal gas": { + 1, + 1, + 1, big0, big10, big10, @@ -97,6 +109,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { false, }, "base 0 - mod > 8192 - 0 gas": { + 1, + 1, + 1234, big0, big10, big8194, @@ -104,6 +119,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { true, }, "base over 8192 - 0 gas": { + 1234, + 1, + 1, big8194, big10, big10, @@ -111,6 +129,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { true, }, "exp over 8192 - 0 gas": { + 1, + 1234, + 1, big10, big8194, big10, @@ -118,6 +139,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { true, }, "mod over 8192 - 0 gas": { + 1, + 1, + 1234, big10, big10, big8194, @@ -126,72 +150,142 @@ func Test_ModExpZkevm_Gas(t *testing.T) { }, // tests beyond here are taken from the test vectors here https://github.com/0xPolygonHermez/zkevm-testvectors/blob/2b70027e11a427c15994713b41ef9b6794c2f3bb/tools-inputs/data/calldata/pre-modexp.json#L787 "pre-modexp-test-case_0": { - "0x1", - "0x1", + 64, + 32, + 32, + "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001", "0x1111111111000000000000000000000000000000000000000000000000000000", true, false, }, - "pre-modexp-test-case_1": { - "0x7", - "0x8", - "0x9", + "pre-modexp_0": { + 32, + 32, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000009", true, false, }, - "pre-modexp_0": { + "pre-modexp_1": { + 64, + 32, + 32, "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", - "0x1", - "0x9", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000009", true, false, }, - "pre-modexp_1": { + "pre-modexp_2": { + 64, + 32, + 34, "0x00000000000000000000000000000000000000000000000000000000000001110000000000000000000000000000000000000000000000000000000000000000", - "0x1000", - "0x0000000000000000000000000000000000000000000000000000000000ffffff0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000001000", + "0x00000000000000000000000000000000000000000000000000000000000000ffffff", true, false, }, - "pre-modexp_10": { - "0x3", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", - "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - true, + "pre-modexp_3": { + 1025, + 32, + 32, + big8194, + "0x1000", + "0x0000000000000000000000000000000000000000000000000000000000ffffff0000000000000000000000000000000000000000000000000000000000000000", false, - }, - "pre-modexp_12": { - "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "0x1", - "0x9", true, + }, + "pre-modexp_4": { + 32, + 1025, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000001", + big8194, + "0x0000000000000000000000000000000000000000000000000000000000000001", false, + true, }, "pre-modexp_5": { + 32, + 32, + 1025, + "0xf000000000000000000000000000000000000000000000000000000000000000", + "0xf000000000000000000000000000000000000000000000000000000000000010", + big8194, + false, + true, + }, + "pre-modexp_6": { + 32, + 32, + 32, "0xf000000000000000000000000000000000000000000000000000000000000000", "0xf000000000000000000000000000000000000000000000000000000000000010", "0xf000000000000000000000000000000000000000000000000000000000000055", true, false, }, - "pre-modexp_6": { - "0x20", + "pre-modexp_7": { + 32, + 32, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000020", "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0xf000000000000000000000000000000000000000000000000000000000000055", true, false, }, - "pre-modexp_7": { + "pre-modexp_8": { + 128, + 32, + 128, "0x000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000002b32af000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004339f6e1061a", "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001eb07e0ea000000000000000000000000000000000000000000000000000000056101669d", true, false, }, - "pre-modexp_8": { + "pre-modexp_9": { + 64, + 32, + 34, "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "0x1", - "0x00000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x00000000000000000000000000000000000000000000000000000000000000000009", + true, + false, + }, + "pre-modexp_10": { + 64, + 32, + 34, + "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111", + "0x0000000000000000000000000000000000000000000000000000000000001000", + "0x00000000000000000000000000000000000000000000000000000000000000ffffff", + true, + false, + }, + "pre-modexp_11": { + 1, + 32, + 32, + "0x3", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + true, + false, + }, + "pre-modexp_12": { + 0, + 32, + 32, + "0x0", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000009", true, false, }, @@ -209,9 +303,9 @@ func Test_ModExpZkevm_Gas(t *testing.T) { exp := len(bigExp.Bytes()) mod := len(bigMod.Bytes()) - input = append(input, uint64To32Bytes(base)...) - input = append(input, uint64To32Bytes(exp)...) - input = append(input, uint64To32Bytes(mod)...) + input = append(input, uint64To32Bytes(test.lenBase)...) + input = append(input, uint64To32Bytes(test.lenExp)...) + input = append(input, uint64To32Bytes(test.lenMod)...) input = append(input, uint64ToDeterminedBytes(bigBase, base)...) input = append(input, uint64ToDeterminedBytes(bigExp, exp)...) input = append(input, uint64ToDeterminedBytes(bigMod, mod)...) From ba0a89e58fd013e637fc3532492c56b6531a37ac Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:32:04 +0200 Subject: [PATCH 81/88] Refactor(smt): code quality (#1509) * fix(smt): handle errors in updateDepth and SetDepth methods * refactor(smt_batch): separate functions and repeating code * fix: progress chan size * refactor: function params new lines * refactor: function params on new lines --- smt/pkg/smt/smt.go | 11 +- smt/pkg/smt/smt_batch.go | 366 ++++++++++++++++++++++++--------------- 2 files changed, 231 insertions(+), 146 deletions(-) diff --git a/smt/pkg/smt/smt.go b/smt/pkg/smt/smt.go index 50d0221916d..08c9b682250 100644 --- a/smt/pkg/smt/smt.go +++ b/smt/pkg/smt/smt.go @@ -503,7 +503,9 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old utils.RemoveOver(siblings, level+1) - s.updateDepth(len(siblings)) + if err := s.updateDepth(len(siblings)); err != nil { + return nil, fmt.Errorf("updateDepth: %w", err) + } for level >= 0 { hashValueIn, err := utils.NodeValue8FromBigIntArray(siblings[level][0:8]) @@ -639,7 +641,7 @@ func (s *SMT) CheckOrphanedNodes(ctx context.Context) int { return len(orphanedNodes) } -func (s *SMT) updateDepth(newDepth int) { +func (s *SMT) updateDepth(newDepth int) error { oldDepth, err := s.Db.GetDepth() if err != nil { oldDepth = 0 @@ -652,8 +654,11 @@ func (s *SMT) updateDepth(newDepth int) { newDepthAsByte := byte(newDepth & 0xFF) if oldDepth < newDepthAsByte { - _ = s.Db.SetDepth(newDepthAsByte) + if err := s.Db.SetDepth(newDepthAsByte); err != nil { + return fmt.Errorf("s.Db.SetDepth: %w", err) + } } + return nil } /* diff --git a/smt/pkg/smt/smt_batch.go b/smt/pkg/smt/smt_batch.go index e7676665918..85b33030dd3 100644 --- a/smt/pkg/smt/smt_batch.go +++ b/smt/pkg/smt/smt_batch.go @@ -24,77 +24,64 @@ func NewInsertBatchConfig(ctx context.Context, logPrefix string, shouldPrintProg } } -func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes []*[4]uint64, rootNodeHash *utils.NodeKey) (*SMTResponse, error) { - s.clearUpMutex.Lock() - defer s.clearUpMutex.Unlock() - - var maxInsertingNodePathLevel = 0 - var size int = len(nodeKeys) - var err error - var smtBatchNodeRoot *smtBatchNode - nodeHashesForDelete := make(map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey) - - var progressChanPre chan uint64 - var stopProgressPrinterPre func() - if cfg.shouldPrintProgress { - progressChanPre, stopProgressPrinterPre = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (pre-process)", cfg.logPrefix), uint64(4), false) +func getProgressPrinterPre(logPrefix string, progressType string, size uint64, shouldPrintProgress bool) (progressChanPre *chan uint64, stopProgressPrinterPre func()) { + var newChan chan uint64 + if shouldPrintProgress { + newChan, stopProgressPrinterPre = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (%s)", logPrefix, progressType), size, false) } else { - progressChanPre = make(chan uint64, 100) + newChan = make(chan uint64, size) var once sync.Once stopProgressPrinterPre = func() { - once.Do(func() { close(progressChanPre) }) + once.Do(func() { close(newChan) }) } } - defer stopProgressPrinterPre() - if err = validateDataLengths(nodeKeys, nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - - if err = removeDuplicateEntriesByKeys(&size, &nodeKeys, &nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - - if err = calculateNodeValueHashesIfMissing(nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) + return &newChan, stopProgressPrinterPre +} - if err = calculateRootNodeHashIfNil(s, &rootNodeHash); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - stopProgressPrinterPre() - var progressChan chan uint64 - var stopProgressPrinter func() - if cfg.shouldPrintProgress { - progressChan, stopProgressPrinter = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (process)", cfg.logPrefix), uint64(size), false) - } else { - progressChan = make(chan uint64) - var once sync.Once +func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes []*[4]uint64, rootNodeHash *utils.NodeKey) (r *SMTResponse, err error) { + s.clearUpMutex.Lock() + defer s.clearUpMutex.Unlock() - stopProgressPrinter = func() { - once.Do(func() { close(progressChan) }) - } - } + var ( + maxInsertingNodePathLevel = 0 + size = len(nodeKeys) + smtBatchNodeRoot *smtBatchNode + nodeHashesForDelete = make(map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey) + ) + + //BE CAREFUL: modifies the arrays + if err := s.preprocessBatchedNodeValues( + cfg.logPrefix, + cfg.shouldPrintProgress, + &nodeKeys, + &nodeValues, + &nodeValuesHashes, + &rootNodeHash, + ); err != nil { + return nil, fmt.Errorf("preprocessBatchedNodeValues: %w", err) + } + + //DO NOT MOVE ABOVE PREPROCESS + size = len(nodeKeys) + + progressChan, stopProgressPrinter := getProgressPrinterPre(cfg.logPrefix, "process", uint64(size), cfg.shouldPrintProgress) defer stopProgressPrinter() for i := 0; i < size; i++ { select { case <-cfg.ctx.Done(): - return nil, fmt.Errorf(fmt.Sprintf("[%s] Context done", cfg.logPrefix)) - case progressChan <- uint64(1): + return nil, fmt.Errorf("context done") + case *progressChan <- uint64(1): default: } insertingNodeKey := nodeKeys[i] insertingNodeValue := nodeValues[i] insertingNodeValueHash := nodeValuesHashes[i] - - insertingNodePathLevel, insertingNodePath, insertingPointerToSmtBatchNode, visitedNodeHashes, err := findInsertingPoint(s, insertingNodeKey, rootNodeHash, &smtBatchNodeRoot, insertingNodeValue.IsZero()) + insertingNodePath := insertingNodeKey.GetPath() + insertingNodePathLevel, insertingPointerToSmtBatchNode, visitedNodeHashes, err := s.findInsertingPoint(insertingNodePath, rootNodeHash, &smtBatchNodeRoot, insertingNodeValue.IsZero()) if err != nil { return nil, err } @@ -182,68 +169,22 @@ func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, node } } select { - case progressChan <- uint64(1): + case *progressChan <- uint64(1): default: } stopProgressPrinter() - s.updateDepth(maxInsertingNodePathLevel) - - totalDeleteOps := len(nodeHashesForDelete) - - var progressChanDel chan uint64 - var stopProgressPrinterDel func() - if cfg.shouldPrintProgress { - progressChanDel, stopProgressPrinterDel = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (deletes)", cfg.logPrefix), uint64(totalDeleteOps), false) - } else { - progressChanDel = make(chan uint64, 100) - var once sync.Once - - stopProgressPrinterDel = func() { - once.Do(func() { close(progressChanDel) }) - } - } - defer stopProgressPrinterDel() - for _, mapLevel0 := range nodeHashesForDelete { - progressChanDel <- uint64(1) - for _, mapLevel1 := range mapLevel0 { - for _, mapLevel2 := range mapLevel1 { - for _, nodeHash := range mapLevel2 { - s.Db.DeleteByNodeKey(*nodeHash) - s.Db.DeleteHashKey(*nodeHash) - } - } - } + if err := s.updateDepth(maxInsertingNodePathLevel); err != nil { + return nil, fmt.Errorf("updateDepth: %w", err) } - stopProgressPrinterDel() - - totalFinalizeOps := len(nodeValues) - var progressChanFin chan uint64 - var stopProgressPrinterFin func() - if cfg.shouldPrintProgress { - progressChanFin, stopProgressPrinterFin = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (finalize)", cfg.logPrefix), uint64(totalFinalizeOps), false) - } else { - progressChanFin = make(chan uint64, 100) - var once sync.Once - stopProgressPrinterFin = func() { - once.Do(func() { close(progressChanFin) }) - } + if err := s.deleteBatchedNodeValues(cfg.logPrefix, nodeHashesForDelete); err != nil { + return nil, fmt.Errorf("deleteBatchedNodeValues: %w", err) } - defer stopProgressPrinterFin() - for i, nodeValue := range nodeValues { - select { - case progressChanFin <- uint64(1): - default: - } - if !nodeValue.IsZero() { - err = s.hashSaveByPointers(nodeValue.ToUintArrayByPointer(), &utils.BranchCapacity, nodeValuesHashes[i]) - if err != nil { - return nil, err - } - } + + if err := s.saveBatchedNodeValues(cfg.logPrefix, nodeValues, nodeValuesHashes); err != nil { + return nil, fmt.Errorf("saveBatchedNodeValues: %w", err) } - stopProgressPrinterFin() if smtBatchNodeRoot == nil { rootNodeHash = &utils.NodeKey{0, 0, 0, 0} @@ -274,7 +215,97 @@ func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, node }, nil } -func validateDataLengths(nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +// returns the new size of the values batch after removing duplicate entries +func (s *SMT) preprocessBatchedNodeValues( + logPrefix string, + shouldPrintProgress bool, + nodeKeys *[]*utils.NodeKey, + nodeValues *[]*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, + rootNodeHash **utils.NodeKey, +) error { + progressChanPre, stopProgressPrinterPre := getProgressPrinterPre(logPrefix, "pre-process", 4, shouldPrintProgress) + defer stopProgressPrinterPre() + + if err := validateDataLengths(*nodeKeys, *nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("validateDataLengths: %w", err) + } + *progressChanPre <- uint64(1) + + if err := removeDuplicateEntriesByKeys(nodeKeys, nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("removeDuplicateEntriesByKeys: %w", err) + } + *progressChanPre <- uint64(1) + + if err := calculateNodeValueHashesIfMissing(*nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("calculateNodeValueHashesIfMissing: %w", err) + } + *progressChanPre <- uint64(1) + + if err := calculateRootNodeHashIfNil(s, rootNodeHash); err != nil { + return fmt.Errorf("calculateRootNodeHashIfNil: %w", err) + } + *progressChanPre <- uint64(1) + stopProgressPrinterPre() + + return nil +} + +func (s *SMT) deleteBatchedNodeValues( + logPrefix string, + nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, +) error { + progressChanDel, stopProgressPrinterDel := getProgressPrinterPre(logPrefix, "deletes", uint64(len(nodeHashesForDelete)), false) + defer stopProgressPrinterDel() + + for _, mapLevel0 := range nodeHashesForDelete { + *progressChanDel <- uint64(1) + for _, mapLevel1 := range mapLevel0 { + for _, mapLevel2 := range mapLevel1 { + for _, nodeHash := range mapLevel2 { + if err := s.Db.DeleteByNodeKey(*nodeHash); err != nil { + return fmt.Errorf("DeleteByNodeKey: %w", err) + } + if err := s.Db.DeleteHashKey(*nodeHash); err != nil { + return fmt.Errorf("DeleteHashKey: %w", err) + } + } + } + } + } + stopProgressPrinterDel() + + return nil +} + +func (s *SMT) saveBatchedNodeValues( + logPrefix string, + nodeValues []*utils.NodeValue8, + nodeValuesHashes []*[4]uint64, +) error { + progressChanFin, stopProgressPrinterFin := getProgressPrinterPre(logPrefix, "finalize", uint64(len(nodeValues)), false) + defer stopProgressPrinterFin() + + for i, nodeValue := range nodeValues { + select { + case *progressChanFin <- uint64(1): + default: + } + if !nodeValue.IsZero() { + if err := s.hashSaveByPointers(nodeValue.ToUintArrayByPointer(), &utils.BranchCapacity, nodeValuesHashes[i]); err != nil { + return err + } + } + } + stopProgressPrinterFin() + return nil +} + +func validateDataLengths( + nodeKeys []*utils.NodeKey, + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { var size int = len(nodeKeys) if len(nodeValues) != size { @@ -291,12 +322,17 @@ func validateDataLengths(nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValu return nil } -func removeDuplicateEntriesByKeys(size *int, nodeKeys *[]*utils.NodeKey, nodeValues *[]*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +func removeDuplicateEntriesByKeys( + nodeKeys *[]*utils.NodeKey, + nodeValues *[]*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { + size := len(*nodeKeys) storage := make(map[uint64]map[uint64]map[uint64]map[uint64]int) - resultNodeKeys := make([]*utils.NodeKey, 0, *size) - resultNodeValues := make([]*utils.NodeValue8, 0, *size) - resultNodeValuesHashes := make([]*[4]uint64, 0, *size) + resultNodeKeys := make([]*utils.NodeKey, 0, size) + resultNodeValues := make([]*utils.NodeValue8, 0, size) + resultNodeValuesHashes := make([]*[4]uint64, 0, size) for i, nodeKey := range *nodeKeys { setNodeKeyMapValue(storage, nodeKey, i) @@ -319,12 +355,13 @@ func removeDuplicateEntriesByKeys(size *int, nodeKeys *[]*utils.NodeKey, nodeVal *nodeValues = resultNodeValues *nodeValuesHashes = resultNodeValuesHashes - *size = len(*nodeKeys) - return nil } -func calculateNodeValueHashesIfMissing(nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +func calculateNodeValueHashesIfMissing( + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { var globalError error size := len(nodeValues) cpuNum := parallel.DefaultNumGoroutines() @@ -358,7 +395,12 @@ func calculateNodeValueHashesIfMissing(nodeValues []*utils.NodeValue8, nodeValue return globalError } -func calculateNodeValueHashesIfMissingInInterval(nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64, startIndex, endIndex int) error { +func calculateNodeValueHashesIfMissingInInterval( + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, + startIndex, + endIndex int, +) error { for i := startIndex; i < endIndex; i++ { if (*nodeValuesHashes)[i] != nil { continue @@ -382,36 +424,43 @@ func calculateRootNodeHashIfNil(s *SMT, root **utils.NodeKey) error { return nil } -func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *utils.NodeKey, insertingPointerToSmtBatchNode **smtBatchNode, fetchDirectSiblings bool) (int, []int, **smtBatchNode, []*utils.NodeKey, error) { - var err error - var insertingNodePathLevel int = -1 - var insertingPointerToSmtBatchNodeParent *smtBatchNode - - var visitedNodeHashes = make([]*utils.NodeKey, 0, 256) - - var nextInsertingPointerNodeHash *utils.NodeKey - var nextInsertingPointerToSmtBatchNode **smtBatchNode - - insertingNodePath := insertingNodeKey.GetPath() +func (s *SMT) findInsertingPoint( + insertingNodePath []int, + insertingPointerNodeHash *utils.NodeKey, + insertingPointerToSmtBatchNode **smtBatchNode, + fetchDirectSiblings bool, +) ( + insertingNodePathLevel int, + nextInsertingPointerToSmtBatchNode **smtBatchNode, + visitedNodeHashes []*utils.NodeKey, + err error, +) { + insertingNodePathLevel = -1 + visitedNodeHashes = make([]*utils.NodeKey, 0, 256) + + var ( + insertingPointerToSmtBatchNodeParent *smtBatchNode + nextInsertingPointerNodeHash *utils.NodeKey + ) for { if (*insertingPointerToSmtBatchNode) == nil { // update in-memory structure from db if !insertingPointerNodeHash.IsZero() { - *insertingPointerToSmtBatchNode, err = fetchNodeDataFromDb(s, insertingPointerNodeHash, insertingPointerToSmtBatchNodeParent) + *insertingPointerToSmtBatchNode, err = s.fetchNodeDataFromDb(insertingPointerNodeHash, insertingPointerToSmtBatchNodeParent) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, insertingPointerNodeHash) } else { if insertingNodePathLevel != -1 { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("nodekey is zero at non-root level") + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("nodekey is zero at non-root level") } } } if (*insertingPointerToSmtBatchNode) == nil { if insertingNodePathLevel != -1 { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("working smt pointer is nil at non-root level") + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("working smt pointer is nil at non-root level") } break } @@ -425,16 +474,16 @@ func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *util if fetchDirectSiblings { // load direct siblings of a non-leaf from the DB if (*insertingPointerToSmtBatchNode).leftNode == nil { - (*insertingPointerToSmtBatchNode).leftNode, err = fetchNodeDataFromDb(s, (*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey, (*insertingPointerToSmtBatchNode)) + (*insertingPointerToSmtBatchNode).leftNode, err = s.fetchNodeDataFromDb((*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey, (*insertingPointerToSmtBatchNode)) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, (*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey) } if (*insertingPointerToSmtBatchNode).rightNode == nil { - (*insertingPointerToSmtBatchNode).rightNode, err = fetchNodeDataFromDb(s, (*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash, (*insertingPointerToSmtBatchNode)) + (*insertingPointerToSmtBatchNode).rightNode, err = s.fetchNodeDataFromDb((*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash, (*insertingPointerToSmtBatchNode)) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, (*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash) } @@ -452,10 +501,13 @@ func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *util insertingPointerToSmtBatchNode = nextInsertingPointerToSmtBatchNode } - return insertingNodePathLevel, insertingNodePath, insertingPointerToSmtBatchNode, visitedNodeHashes, nil + return insertingNodePathLevel, insertingPointerToSmtBatchNode, visitedNodeHashes, nil } -func updateNodeHashesForDelete(nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, visitedNodeHashes []*utils.NodeKey) { +func updateNodeHashesForDelete( + nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, + visitedNodeHashes []*utils.NodeKey, +) { for _, visitedNodeHash := range visitedNodeHashes { if visitedNodeHash == nil { continue @@ -466,7 +518,12 @@ func updateNodeHashesForDelete(nodeHashesForDelete map[uint64]map[uint64]map[uin } // no point to parallelize this function because db consumer is slower than this producer -func calculateAndSaveHashesDfs(sdh *smtDfsHelper, smtBatchNode *smtBatchNode, path []int, level int) { +func calculateAndSaveHashesDfs( + sdh *smtDfsHelper, + smtBatchNode *smtBatchNode, + path []int, + level int, +) { if smtBatchNode.isLeaf() { hashObj, hashValue := utils.HashKeyAndValueByPointers(utils.ConcatArrays4ByPointers(smtBatchNode.nodeLeftHashOrRemainingKey.AsUint64Pointer(), smtBatchNode.nodeRightHashOrValueHash.AsUint64Pointer()), &utils.LeafCapacity) smtBatchNode.hash = hashObj @@ -515,7 +572,11 @@ type smtBatchNode struct { hash *[4]uint64 } -func newSmtBatchNodeLeaf(nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash *utils.NodeKey, parentNode *smtBatchNode) *smtBatchNode { +func newSmtBatchNodeLeaf( + nodeLeftHashOrRemainingKey, + nodeRightHashOrValueHash *utils.NodeKey, + parentNode *smtBatchNode, +) *smtBatchNode { return &smtBatchNode{ nodeLeftHashOrRemainingKey: nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash: nodeRightHashOrValueHash, @@ -527,7 +588,7 @@ func newSmtBatchNodeLeaf(nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash *u } } -func fetchNodeDataFromDb(s *SMT, nodeHash *utils.NodeKey, parentNode *smtBatchNode) (*smtBatchNode, error) { +func (s *SMT) fetchNodeDataFromDb(nodeHash *utils.NodeKey, parentNode *smtBatchNode) (*smtBatchNode, error) { if nodeHash.IsZero() { return nil, nil } @@ -586,7 +647,11 @@ func (sbn *smtBatchNode) updateHashesAfterDelete() { } } -func (sbn *smtBatchNode) createALeafInEmptyDirection(insertingNodePath []int, insertingNodePathLevel int, insertingNodeKey *utils.NodeKey) (**smtBatchNode, error) { +func (sbn *smtBatchNode) createALeafInEmptyDirection( + insertingNodePath []int, + insertingNodePathLevel int, + insertingNodeKey *utils.NodeKey, +) (**smtBatchNode, error) { direction := insertingNodePath[insertingNodePathLevel] childPointer := sbn.getChildInDirection(direction) if (*childPointer) != nil { @@ -597,7 +662,10 @@ func (sbn *smtBatchNode) createALeafInEmptyDirection(insertingNodePath []int, in return childPointer, nil } -func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection(insertingNodeKey []int, insertingNodeKeyLevel int) **smtBatchNode { +func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection( + insertingNodeKey []int, + insertingNodeKeyLevel int, +) **smtBatchNode { direction := insertingNodeKey[insertingNodeKeyLevel] insertingNodeKeyUpToLevel := insertingNodeKey[:insertingNodeKeyLevel] @@ -614,7 +682,12 @@ func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection(insertingNodeKey []i return childPointer } -func (sbn *smtBatchNode) collapseLeafByRemovingTheSingleLeaf(insertingNodeKey []int, insertingNodeKeyLevel int, theSingleLeaf *smtBatchNode, theSingleNodeLeafDirection int) **smtBatchNode { +func (sbn *smtBatchNode) collapseLeafByRemovingTheSingleLeaf( + insertingNodeKey []int, + insertingNodeKeyLevel int, + theSingleLeaf *smtBatchNode, + theSingleNodeLeafDirection int, +) **smtBatchNode { insertingNodeKeyUpToLevel := insertingNodeKey[:insertingNodeKeyLevel+1] insertingNodeKeyUpToLevel[insertingNodeKeyLevel] = theSingleNodeLeafDirection nodeKey := utils.JoinKey(insertingNodeKeyUpToLevel, *theSingleLeaf.nodeLeftHashOrRemainingKey) @@ -688,7 +761,11 @@ func (sdh *smtDfsHelper) startConsumersLoop(s *SMT) error { } } -func setNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, nodeKey *utils.NodeKey, value T) { +func setNodeKeyMapValue[T int | *utils.NodeKey]( + nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, + nodeKey *utils.NodeKey, + value T, +) { mapLevel0, found := nodeKeyMap[nodeKey[0]] if !found { mapLevel0 = make(map[uint64]map[uint64]map[uint64]T) @@ -710,7 +787,10 @@ func setNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64 mapLevel2[nodeKey[3]] = value } -func getNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, nodeKey *utils.NodeKey) (T, bool) { +func getNodeKeyMapValue[T int | *utils.NodeKey]( + nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, + nodeKey *utils.NodeKey, +) (T, bool) { var notExistingValue T mapLevel0, found := nodeKeyMap[nodeKey[0]] From fbef6080d282ff81c2841a717671bec4addc8328 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Mon, 2 Dec 2024 11:05:25 +0200 Subject: [PATCH 82/88] feat: add witness cache stage (#1469) * feat: add witness cache stage * fix: merge problems * fix: merge problems * fix: tests * fix: unwinds * refactor: remove unused function * fix: add witness cache table in erigon-lib * feat: add merge witnesses and tests * feat: batch witness get from db * fix: minor refactoring and flag fixes * fix: unwindzksmt argument * refactor: prints and add test * fix: tests * fix: merge build * fix: add tests and fix witness parsing bugs * feat: implement witness utility function and improve error handling * feat: add SMT node retrieval and utility functions * feat: restore L1 Info tree stage functionality with sync, unwind, and prune methods * fix: correct db table for delete caches * fix: do not unwind if stage witness flag not set * fix: quit channel in unwindsmt * refactor: separate changes getter in a file --- cmd/utils/flags.go | 10 + core/rawdb/accessors_chain_zkevm.go | 27 +++ core/state/trie_db.go | 2 +- erigon-lib/kv/tables.go | 2 + eth/ethconfig/config_zkevm.go | 2 + eth/stagedsync/stages/stages_zk.go | 1 + smt/pkg/db/mdbx.go | 2 +- smt/pkg/db/mem-db.go | 10 +- smt/pkg/smt/smt.go | 1 - smt/pkg/smt/smt_utils.go | 49 ++++ smt/pkg/smt/smt_utils_test.go | 92 ++++++++ smt/pkg/smt/witness.go | 100 ++++---- smt/pkg/smt/witness_test.go | 46 +++- smt/pkg/smt/witness_test_data.go | 6 + smt/pkg/smt/witness_utils.go | 11 + turbo/cli/default_flags.go | 2 + turbo/cli/flags_zkevm.go | 8 + turbo/jsonrpc/zkevm_api.go | 160 ++++++------- turbo/stages/zk_stages.go | 1 + turbo/trie/witness.go | 117 +++++---- zk/hermez_db/db.go | 21 +- zk/l1_data/l1_decoder.go | 8 +- zk/smt/changes_getter.go | 200 ++++++++++++++++ zk/smt/unwind_smt.go | 91 +++++++ zk/stages/stage_interhashes.go | 207 +--------------- zk/stages/stage_witness.go | 327 ++++++++++++++++++++++++++ zk/stages/stages.go | 15 ++ zk/witness/witness.go | 234 +++++------------- zk/witness/witness_merge_test_data.go | 8 + zk/witness/witness_utils.go | 199 ++++++++++++++++ zk/witness/witness_utils_test.go | 203 ++++++++++++++++ 31 files changed, 1590 insertions(+), 572 deletions(-) create mode 100644 smt/pkg/smt/smt_utils.go create mode 100644 smt/pkg/smt/smt_utils_test.go create mode 100644 smt/pkg/smt/witness_test_data.go create mode 100644 smt/pkg/smt/witness_utils.go create mode 100644 zk/smt/changes_getter.go create mode 100644 zk/smt/unwind_smt.go create mode 100644 zk/stages/stage_witness.go create mode 100644 zk/witness/witness_merge_test_data.go create mode 100644 zk/witness/witness_utils.go create mode 100644 zk/witness/witness_utils_test.go diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 319b3437fbf..c670d212209 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -773,6 +773,16 @@ var ( Usage: "Mock the witness generation", Value: false, } + WitnessCacheEnable = cli.BoolFlag{ + Name: "zkevm.witness-cache-enable", + Usage: "Enable witness cache", + Value: false, + } + WitnessCacheLimit = cli.UintFlag{ + Name: "zkevm.witness-cache-limit", + Usage: "Amount of blocks behind the last executed one to keep witnesses for. Needs a lot of HDD space. Default value 10 000.", + Value: 10000, + } WitnessContractInclusion = cli.StringFlag{ Name: "zkevm.witness-contract-inclusion", Usage: "Contracts that will have all of their storage added to the witness every time", diff --git a/core/rawdb/accessors_chain_zkevm.go b/core/rawdb/accessors_chain_zkevm.go index f50d073eb64..e6bfe2787d0 100644 --- a/core/rawdb/accessors_chain_zkevm.go +++ b/core/rawdb/accessors_chain_zkevm.go @@ -6,6 +6,7 @@ import ( "fmt" "math" + "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" @@ -252,3 +253,29 @@ func ReadReceipts_zkEvm(db kv.Tx, block *types.Block, senders []libcommon.Addres } return receipts } + +func ReadHeaderByNumber_zkevm(db kv.Getter, number uint64) (header *types.Header, err error) { + hash, err := ReadCanonicalHash(db, number) + if err != nil { + return nil, fmt.Errorf("ReadCanonicalHash: %w", err) + } + if hash == (common.Hash{}) { + return nil, nil + } + + return ReadHeader_zkevm(db, hash, number) +} + +// ReadHeader retrieves the block header corresponding to the hash. +func ReadHeader_zkevm(db kv.Getter, hash common.Hash, number uint64) (header *types.Header, err error) { + data := ReadHeaderRLP(db, hash, number) + if len(data) == 0 { + return nil, nil + } + + header = new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + return nil, fmt.Errorf("invalid block header RLP hash: %v, err: %w", hash, err) + } + return header, nil +} diff --git a/core/state/trie_db.go b/core/state/trie_db.go index 3a13013b83e..965562315d0 100644 --- a/core/state/trie_db.go +++ b/core/state/trie_db.go @@ -740,7 +740,7 @@ type TrieStateWriter struct { tds *TrieDbState } -func (tds *TrieDbState) TrieStateWriter() *TrieStateWriter { +func (tds *TrieDbState) NewTrieStateWriter() *TrieStateWriter { return &TrieStateWriter{tds: tds} } diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index ce8baaa5b8b..e9aebf625fe 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -547,6 +547,7 @@ const ( TableHashKey = "HermezSmtHashKey" TablePoolLimbo = "PoolLimbo" BATCH_ENDS = "batch_ends" + WITNESS_CACHE = "witness_cache" //Diagnostics tables DiagSystemInfo = "DiagSystemInfo" DiagSyncStages = "DiagSyncStages" @@ -791,6 +792,7 @@ var ChaindataTables = []string{ TableHashKey, TablePoolLimbo, BATCH_ENDS, + WITNESS_CACHE, } const ( diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 31b069531f0..3142a368e57 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -94,6 +94,8 @@ type Zk struct { BadBatches []uint64 SealBatchImmediatelyOnOverflow bool MockWitnessGeneration bool + WitnessCacheEnabled bool + WitnessCacheLimit uint64 WitnessContractInclusion []common.Address } diff --git a/eth/stagedsync/stages/stages_zk.go b/eth/stagedsync/stages/stages_zk.go index 4ac4583fa82..42936bdb615 100644 --- a/eth/stagedsync/stages/stages_zk.go +++ b/eth/stagedsync/stages/stages_zk.go @@ -31,4 +31,5 @@ var ( // HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" L1BlockSync SyncStage = "L1BlockSync" + Witness SyncStage = "Witness" ) diff --git a/smt/pkg/db/mdbx.go b/smt/pkg/db/mdbx.go index adca963eaac..c3c642a1037 100644 --- a/smt/pkg/db/mdbx.go +++ b/smt/pkg/db/mdbx.go @@ -252,7 +252,7 @@ func (m *EriRoDb) GetKeySource(key utils.NodeKey) ([]byte, error) { } if data == nil { - return nil, fmt.Errorf("key %x not found", keyConc.Bytes()) + return nil, ErrNotFound } return data, nil diff --git a/smt/pkg/db/mem-db.go b/smt/pkg/db/mem-db.go index 949f267b402..bd45994628a 100644 --- a/smt/pkg/db/mem-db.go +++ b/smt/pkg/db/mem-db.go @@ -9,6 +9,10 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/utils" ) +var ( + ErrNotFound = fmt.Errorf("key not found") +) + type MemDb struct { Db map[string][]string DbAccVal map[string][]string @@ -184,7 +188,7 @@ func (m *MemDb) GetKeySource(key utils.NodeKey) ([]byte, error) { s, ok := m.DbKeySource[keyConc.String()] if !ok { - return nil, fmt.Errorf("key not found") + return nil, ErrNotFound } return s, nil @@ -224,7 +228,7 @@ func (m *MemDb) GetHashKey(key utils.NodeKey) (utils.NodeKey, error) { s, ok := m.DbHashKey[k] if !ok { - return utils.NodeKey{}, fmt.Errorf("key not found") + return utils.NodeKey{}, ErrNotFound } nv := big.NewInt(0).SetBytes(s) @@ -243,7 +247,7 @@ func (m *MemDb) GetCode(codeHash []byte) ([]byte, error) { s, ok := m.DbCode["0x"+hex.EncodeToString(codeHash)] if !ok { - return nil, fmt.Errorf("key not found") + return nil, ErrNotFound } return s, nil diff --git a/smt/pkg/smt/smt.go b/smt/pkg/smt/smt.go index 08c9b682250..6d541cc8914 100644 --- a/smt/pkg/smt/smt.go +++ b/smt/pkg/smt/smt.go @@ -718,7 +718,6 @@ func (s *RoSMT) traverse(ctx context.Context, node *big.Int, action TraverseActi childPrefix[len(prefix)] = byte(i) err := s.traverse(ctx, child.ToBigInt(), action, childPrefix) if err != nil { - fmt.Println(err) return err } } diff --git a/smt/pkg/smt/smt_utils.go b/smt/pkg/smt/smt_utils.go new file mode 100644 index 00000000000..aed504d1643 --- /dev/null +++ b/smt/pkg/smt/smt_utils.go @@ -0,0 +1,49 @@ +package smt + +import ( + "fmt" + + "github.com/ledgerwatch/erigon/smt/pkg/utils" +) + +var ( + ErrEmptySearchPath = fmt.Errorf("search path is empty") +) + +func (s *SMT) GetNodeAtPath(path []int) (nodeV *utils.NodeValue12, err error) { + pathLen := len(path) + if pathLen == 0 { + return nil, ErrEmptySearchPath + } + + var sl utils.NodeValue12 + + oldRoot, err := s.getLastRoot() + if err != nil { + return nil, fmt.Errorf("getLastRoot: %w", err) + } + + for level, pathByte := range path { + sl, err = s.Db.Get(oldRoot) + if err != nil { + return nil, err + } + + if sl.IsFinalNode() { + foundRKey := utils.NodeKeyFromBigIntArray(sl[0:4]) + if level < pathLen-1 || + foundRKey.GetPath()[0] != pathByte { + return nil, nil + } + + break + } else { + oldRoot = utils.NodeKeyFromBigIntArray(sl[pathByte*4 : pathByte*4+4]) + if oldRoot.IsZero() { + return nil, nil + } + } + } + + return &sl, nil +} diff --git a/smt/pkg/smt/smt_utils_test.go b/smt/pkg/smt/smt_utils_test.go new file mode 100644 index 00000000000..f30d1646bd5 --- /dev/null +++ b/smt/pkg/smt/smt_utils_test.go @@ -0,0 +1,92 @@ +package smt + +import ( + "math/big" + "testing" + + "github.com/ledgerwatch/erigon/smt/pkg/utils" + "github.com/stretchr/testify/assert" +) + +func Test_DoesNodeExist(t *testing.T) { + tests := []struct { + name string + insertPaths [][]int + searchPath []int + expectedResult bool + expectedError error + }{ + { + name: "empty tree", + insertPaths: [][]int{}, + searchPath: []int{1}, + expectedResult: false, + expectedError: nil, + }, + { + name: "Search for empty path", + insertPaths: [][]int{{1}}, + searchPath: []int{}, + expectedResult: false, + expectedError: ErrEmptySearchPath, + }, + { + name: "Insert 1 node and search for it", + insertPaths: [][]int{{1}}, + searchPath: []int{1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Insert 1 node and search for the one next to it", + insertPaths: [][]int{{1}}, + searchPath: []int{0}, + expectedResult: false, + expectedError: nil, + }, + { + name: "Insert 2 nodes and search for the first one", + insertPaths: [][]int{{1}, {1, 1}}, + searchPath: []int{1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Insert 2 nodes and search for the second one", + insertPaths: [][]int{{1}, {1, 1}}, + searchPath: []int{1, 1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Search for node with longer path than the depth", + insertPaths: [][]int{{1}}, + searchPath: []int{1, 1}, + expectedResult: false, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSMT(nil, false) + for _, insertPath := range tt.insertPaths { + fullPath := make([]int, 256) + copy(fullPath, insertPath) + nodeKey, err := utils.NodeKeyFromPath(fullPath) + assert.NoError(t, err, tt.name+": Failed to create node key from path ") + _, err = s.InsertKA(nodeKey, new(big.Int).SetUint64(1) /*arbitrary, not used in test*/) + assert.NoError(t, err, tt.name+": Failed to insert node") + } + + result, err := s.GetNodeAtPath(tt.searchPath) + if tt.expectedError != nil { + assert.Error(t, err, tt.name) + assert.Equal(t, tt.expectedError, err, tt.name) + } else { + assert.NoError(t, err, tt.name) + } + assert.Equal(t, tt.expectedResult, result != nil, tt.name) + }) + } +} diff --git a/smt/pkg/smt/witness.go b/smt/pkg/smt/witness.go index 5fc7d64e336..ef80f6ab3ed 100644 --- a/smt/pkg/smt/witness.go +++ b/smt/pkg/smt/witness.go @@ -5,17 +5,18 @@ import ( "fmt" "math/big" - libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/smt/pkg/db" "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/status-im/keycard-go/hexutils" ) // BuildWitness creates a witness from the SMT -func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Witness, error) { +func (s *RoSMT) BuildWitness(rd trie.RetainDecider, ctx context.Context) (*trie.Witness, error) { operands := make([]trie.WitnessOperator, 0) - root, err := s.Db.GetLastRoot() + root, err := s.DbRo.GetLastRoot() if err != nil { return nil, err } @@ -47,7 +48,7 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } if !retain { - h := libcommon.BigToHash(k.ToBigInt()) + h := common.BigToHash(k.ToBigInt()) hNode := trie.OperatorHash{Hash: h} operands = append(operands, &hNode) return false, nil @@ -55,12 +56,17 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } if v.IsFinalNode() { - actualK, err := s.Db.GetHashKey(k) - if err != nil { + actualK, err := s.DbRo.GetHashKey(k) + if err == db.ErrNotFound { + h := common.BigToHash(k.ToBigInt()) + hNode := trie.OperatorHash{Hash: h} + operands = append(operands, &hNode) + return false, nil + } else if err != nil { return false, err } - keySource, err := s.Db.GetKeySource(actualK) + keySource, err := s.DbRo.GetKeySource(actualK) if err != nil { return false, err } @@ -71,14 +77,14 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } valHash := v.Get4to8() - v, err := s.Db.Get(*valHash) + v, err := s.DbRo.Get(*valHash) if err != nil { return false, err } vInBytes := utils.ArrayBigToScalar(utils.BigIntArrayFromNodeValue8(v.GetNodeValue8())).Bytes() if t == utils.SC_CODE { - code, err := s.Db.GetCode(vInBytes) + code, err := s.DbRo.GetCode(vInBytes) if err != nil { return false, err } @@ -86,11 +92,15 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit operands = append(operands, &trie.OperatorCode{Code: code}) } + storageKeyBytes := storage.Bytes() + if t != utils.SC_STORAGE { + storageKeyBytes = []byte{} + } // fmt.Printf("Node hash: %s, Node type: %d, address %x, storage %x, value %x\n", utils.ConvertBigIntToHex(k.ToBigInt()), t, addr, storage, utils.ArrayBigToScalar(value8).Bytes()) operands = append(operands, &trie.OperatorSMTLeafValue{ NodeType: uint8(t), Address: addr.Bytes(), - StorageKey: storage.Bytes(), + StorageKey: storageKeyBytes, Value: vInBytes, }) return false, nil @@ -118,10 +128,18 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } // BuildSMTfromWitness builds SMT from witness -func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { +func BuildSMTFromWitness(w *trie.Witness) (*SMT, error) { // using memdb s := NewSMT(nil, false) + if err := AddWitnessToSMT(s, w); err != nil { + return nil, fmt.Errorf("AddWitnessToSMT: %w", err) + } + + return s, nil +} + +func AddWitnessToSMT(s *SMT, w *trie.Witness) error { balanceMap := make(map[string]*big.Int) nonceMap := make(map[string]*big.Int) contractMap := make(map[string]string) @@ -135,7 +153,7 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { type nodeHash struct { path []int - hash libcommon.Hash + hash common.Hash } nodeHashes := make([]nodeHash, 0) @@ -144,8 +162,7 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { switch op := operator.(type) { case *trie.OperatorSMTLeafValue: valScaler := big.NewInt(0).SetBytes(op.Value) - addr := libcommon.BytesToAddress(op.Address) - + addr := common.BytesToAddress(op.Address) switch op.NodeType { case utils.KEY_BALANCE: balanceMap[addr.String()] = valScaler @@ -165,7 +182,6 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { storageMap[addr.String()][stKey] = valScaler.String() } - path = path[:len(path)-1] NodeChildCountMap[intArrayToString(path)] += 1 @@ -177,12 +193,12 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { } case *trie.OperatorCode: - addr := libcommon.BytesToAddress(w.Operators[i+1].(*trie.OperatorSMTLeafValue).Address) + addr := common.BytesToAddress(w.Operators[i+1].(*trie.OperatorSMTLeafValue).Address) code := hexutils.BytesToHex(op.Code) if len(code) > 0 { if err := s.Db.AddCode(hexutils.HexToBytes(code)); err != nil { - return nil, err + return err } code = fmt.Sprintf("0x%s", code) } @@ -212,7 +228,6 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { pathCopy := make([]int, len(path)) copy(pathCopy, path) nodeHashes = append(nodeHashes, nodeHash{path: pathCopy, hash: op.Hash}) - path = path[:len(path)-1] NodeChildCountMap[intArrayToString(path)] += 1 @@ -225,57 +240,52 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { default: // Unsupported operator type - return nil, fmt.Errorf("unsupported operator type: %T", op) + return fmt.Errorf("unsupported operator type: %T", op) } } for _, nodeHash := range nodeHashes { - _, err := s.InsertHashNode(nodeHash.path, nodeHash.hash.Big()) + // should not replace with hash node if there are nodes under it on the current smt + // we would lose needed data i we replace it with a hash node + node, err := s.GetNodeAtPath(nodeHash.path) if err != nil { - return nil, err + return fmt.Errorf("GetNodeAtPath: %w", err) + } + if node != nil { + continue + } + if _, err := s.InsertHashNode(nodeHash.path, nodeHash.hash.Big()); err != nil { + return fmt.Errorf("InsertHashNode: %w", err) } - _, err = s.Db.GetLastRoot() - if err != nil { - return nil, err + if _, err = s.Db.GetLastRoot(); err != nil { + return fmt.Errorf("GetLastRoot: %w", err) } } for addr, balance := range balanceMap { - _, err := s.SetAccountBalance(addr, balance) - if err != nil { - return nil, err + if _, err := s.SetAccountBalance(addr, balance); err != nil { + return fmt.Errorf("SetAccountBalance: %w", err) } } for addr, nonce := range nonceMap { - _, err := s.SetAccountNonce(addr, nonce) - if err != nil { - return nil, err + if _, err := s.SetAccountNonce(addr, nonce); err != nil { + return fmt.Errorf("SetAccountNonce: %w", err) } } for addr, code := range contractMap { - err := s.SetContractBytecode(addr, code) - if err != nil { - return nil, err + if err := s.SetContractBytecode(addr, code); err != nil { + return fmt.Errorf("SetContractBytecode: %w", err) } } for addr, storage := range storageMap { - _, err := s.SetContractStorage(addr, storage, nil) - if err != nil { - fmt.Println("error : unable to set contract storage", err) + if _, err := s.SetContractStorage(addr, storage, nil); err != nil { + return fmt.Errorf("SetContractStorage: %w", err) } } - return s, nil -} - -func intArrayToString(a []int) string { - s := "" - for _, v := range a { - s += fmt.Sprintf("%d", v) - } - return s + return nil } diff --git a/smt/pkg/smt/witness_test.go b/smt/pkg/smt/witness_test.go index 87dae548915..6d3415214f5 100644 --- a/smt/pkg/smt/witness_test.go +++ b/smt/pkg/smt/witness_test.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" ) func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { @@ -31,7 +32,7 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { tds := state.NewTrieDbState(libcommon.Hash{}, tx, 0, state.NewPlainStateReader(tx)) - w := tds.TrieStateWriter() + w := tds.NewTrieStateWriter() intraBlockState := state.New(tds) @@ -46,7 +47,7 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { intraBlockState.AddBalance(contract, balance) intraBlockState.SetState(contract, &sKey, *sVal) - err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.TrieStateWriter()) + err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.NewTrieStateWriter()) require.NoError(t, err, "error finalising 1st tx") err = intraBlockState.CommitBlock(&chain.Rules{}, w) @@ -112,7 +113,7 @@ func TestSMTWitnessRetainList(t *testing.T) { sKey := libcommon.HexToHash("0x5") sVal := uint256.NewInt(0xdeadbeef) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) @@ -139,7 +140,7 @@ func TestSMTWitnessRetainListEmptyVal(t *testing.T) { _, err := smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(0).ToBig()) require.NoError(t, err) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) @@ -160,10 +161,10 @@ func TestSMTWitnessRetainListEmptyVal(t *testing.T) { func TestWitnessToSMT(t *testing.T) { smtTrie, rl := prepareSMT(t) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") - newSMT, err := smt.BuildSMTfromWitness(witness) + newSMT, err := smt.BuildSMTFromWitness(witness) require.NoError(t, err, "error building SMT from witness") root, err := newSMT.Db.GetLastRoot() @@ -190,12 +191,15 @@ func TestWitnessToSMTStateReader(t *testing.T) { expectedRoot, err := smtTrie.Db.GetLastRoot() require.NoError(t, err, "error getting last root") - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") - newSMT, err := smt.BuildSMTfromWitness(witness) + newSMT, err := smt.BuildSMTFromWitness(witness) require.NoError(t, err, "error building SMT from witness") + _, err = newSMT.BuildWitness(rl, context.Background()) + require.NoError(t, err, "error rebuilding witness") + root, err := newSMT.Db.GetLastRoot() require.NoError(t, err, "error getting the last root from db") @@ -239,3 +243,29 @@ func TestWitnessToSMTStateReader(t *testing.T) { // assert that the storage value is the same require.Equal(t, expectedStorageValue, newStorageValue) } + +func TestBlockWitnessLarge(t *testing.T) { + witnessBytes, err := hex.DecodeString(smt.Witness1) + require.NoError(t, err, "error decoding witness") + + w, err := trie.NewWitnessFromReader(bytes.NewReader(witnessBytes), false /* trace */) + if err != nil { + t.Error(err) + } + + smt1, err := smt.BuildSMTFromWitness(w) + require.NoError(t, err, "Could not restore trie from the block witness: %v", err) + + rl := &trie.AlwaysTrueRetainDecider{} + w2, err := smt1.BuildWitness(rl, context.Background()) + require.NoError(t, err, "error building witness") + + //create writer + var buff bytes.Buffer + w.WriteDiff(w2, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +} diff --git a/smt/pkg/smt/witness_test_data.go b/smt/pkg/smt/witness_test_data.go new file mode 100644 index 00000000000..fab6aff4732 --- /dev/null +++ b/smt/pkg/smt/witness_test_data.go @@ -0,0 +1,6 @@ +package smt + +var ( + Witness1 = "0102030203020302030203020303ddd15247a8b234236d91271277b1059a674eaed56c29a6d8905b27ea9460c7e40344f7576ca6198b0bb6daa81b4eb6f594b46608e0f4d8d509361f0aac88eed2b50203020302030203020302030203037477c5b7ac361fa5a28f01782fc1b9577dfe27c9d91e5193c426916c166503f3033e6831fb92c6944c4869e9ff429fd40b9191f5a5a9fd8e4e26f67be29feb3d00020302030310c0064663f729ce8c12a4db054317ae8a3d309ee54378eba25ca39a4670758d03fa715595952a40ebcc9c06b02f6b1960a1f74a722c3a9fecba1aa66f32f1850e0203020303b010b79cdf4c9bd8f8164ad282defed968658e80fa57c26c19f5cadcfd9c890e0318f8d37b605fba62e9bd02f5554b8bd4784578021c737c4cb957c4ed5e8ad3b5020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a0344ed07cf7b7b49fc2e7fc9c6c19d1b60e64990110188e15b445320a35660f91d02030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358207d7b0aec16983b640324af57c161ae800ab5b0b61937d153540fd64ba724a431020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303e5f90b59ef9f5ceee0e0a54551e41a62431ea06aa09be94779c474ca4d18683403e794dec8b1cbcd53bbecf14b61869699ed3f92ecbb4ac3d9a8bc744c09a3e69a020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97030e077b0c8215c8c43753d044e318788eb8e39de692fe0ccd46396d3b06ca6e0c020303ddaa569a922c9c28d9a225a4c212c076ad5e353bb7cceaab630a3884af855d2403d5ff4c122142c026a0b24b79b4a667d25ea916ef64d8a8215aa29a738c5588a50203034b1d465e96a44ba0d7983a6f4ce10a26bce7816b6d51ba8ac594c71892cc2af60381a6db28188e1b651603d41fbc2030bb2b7706e02b1eb3423d7f38ff6ef514e6020303f30f3c3ad2db979a1c81690619a35a801e3bcd77413f37e285b0011f2b6e2a4003239d1f94c6460af24c7228a2af86326ea1199e97365bf7dc5832ad029107445f0203038518fa303494de83c9ae1f80c877b5c0e6dba41880f6df1dbaaff30fa9b9c37a03653c1b2e876da5bd8b6535ce431ae69feb7be788cc67b2fa3dbff11c792c1f13020303d5efbfce398f4205569b3fc872e7405712796b7189d6846e61c7ff33a12ab0c5037aeb2da8a9e504490ac07aee509079823397fc6e9cd25257e658f6e0021ae771020302030203033bfe86ca5a55d4d2d42f5af48205ca0ab08df68e551e61b9a1bd5d575ff9cac3037462982abd4a0437ab5e12ab2af263ab382e0ceba69ff5de751519512149c70a0203020303980043fe396689718e09b0990d71b800219da2873a8e0c3c45d25ffe12bd9e6003f2f9aba950a1023ef8e02568c683c86ef2e77e16dfad909642ddc5cc57ac8c120203020303738b4a16af664d0e0c6b7ff278d1e3b602e6277085730d77844f1430c2f71bcd032c505136023a2005bd6b8abfc49eb783514ea36233d5439525478dc102ad67e402030203020303f30cfa6f63115cc17d752bd07a3848c463334bdf554ffeb5a57f2ac2535c4650037d85b4ea9025d3512a6fafe55d8e3570fc8c968eb67042e0ded283dcadc12ae8020302030351a20a2e192372b9383e5b8ef255adf58a3633e5aa4161424f7b52912e8053f603edc4f75f70c3608079c9f0b4584da6270879e9983bb3513d7e620024f15e659f02030203037e1734c6c90368548b9b6a882b4560d78e0630f3616dc7d4b4b8d77b96a42dbf03c4ed6f8e6cdc9797199a463a51287700852a10099a1386109a37561b538d228502030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc8030203020303521bd9da187efcbab07451097baf98589a33e32cd33501c5a912f48cf2552bef0352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303227561f72db4ee550290a7b85931038224b1fa9c351395f5f5777f397016d7ae03dde5f312229c20faf5b1b27273112bc022bd0d1dad4195ffeeceb49c05001a07020303d4eebbde54471ef4008ea3e23e4bd31119b1d4fa51a2bce7771c95b70efba064038c6a2b8e1f68d72b2a95ef69cd8eb0ab32781e7687049eaf3b7381596c0bb8af0203036ae82b7b420a58afe9871a632d69be8475f745405df2183722c599f94a5cf15f038a575afe8d81ea9f181bee15a971affeffcb1964ed35ec291304be393899d80f02030203020302030203020303d634ac486eb2f4e325a096c1aac56ae5a0a3bba406dcbede2e9bd4837d1759f203ce1b43774de78b19d67b133fb575ead398fae6a712ebd63e26671f199c8e674302030203020302030203036068b215f89f68246518e7d8c967f8ae78b47c69bcb9e97deca5849a813b2e400384b630ffc67a1dd7b502c1b42165171a704d68ed15ced3b7cbb98bd150cd884b020302030203020303a3c7cf45ebdd7e21dade3434624c9fd521b0ab24a6956e3b8a777d700149806703b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa5820000000000000000000000000000000000000000000000000000000000000000158206191319cb3bf48d9701195789dbbf6db5d3b99006317f5e7da37709f3d259374020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da02030350179cb850b147782f26ff9a17895259e569b740cd6424a7a1479602bd8c822b0371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb81031ede6b255a1c6ffd6fa2afc16d61aea6555a5cb85dc4669070b69b55a16ac58d020303335f1f02ebdb1926380c362d23b2d90d791f5ec8531287a47d3a1929d6304f1b037b80208ab1e9bc0411f128ccc859ac552945a650ebd0f9161a63fc9944e8d43f0203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f6610223030bc731957e48cd1b0f92521fa971ca65f76bc8608eaddfa5243baf39838099810203034b176bbe991dbc4ae5738f23e31433597f9b892730ad4fdc86784eb928cbc642035fa76b91882dff00646e99735fb6accf89476e59c9dd28e0bc7b617870cc15e702030309cc884c89c3b546aecc35f4a95760a100cc3fb5457a82fc413ee2cd795345d2037ac7f4c6f9dc0e8f652f47fbda5c4b53428948acc95270be462ef8c909e5b742020303bb4f79f1339f6fc4ba3b4c61ff1940c27b29459942791fd7b160a04bc8aa411803628f665215c8c44bda1a6243487e35e5d9d922bf36f1976fd5f6c39264d16e8b0203036bdcb7f00d848df36ea8ffe2d419775be23396cb7344a2dd1ab44292769c922303710d1c2ccfa13ceec5ffc97b3469592c4f2495141e46bbaaae6f099c47b9737502030203035f97c209aeacbb5fc78e69d247a800528f4bcc5e649fdec626ae5ef510ee7a71036eeb37a43ca943f0bb09cb54bfcc7325ed27e97af16f59cab0822f88d3143888020302030373d9994e2d75a6f80adb914f45d533caf2ade8d952a0d8b73a40299199892f5f0347947690bda8388fbc8744af22af00157531bd0f37353b2407b573cff34e23c20203020303cd4c5dc3e51e3a3379cf73004c787ee7cb312c06c70800d0f08e65a0ee2313c40350adbcaba1f1a5b06ae4510704194cefdb5053ffacdca11f354a80cc04d0a2f402030203037ec1e64855ec72f6c39f1832616a45075eda4889495c393ffb673aa05f25e67d0361c764032c6e6f093f7e4e2db6e3324b29e59ee4df2f6df3536539ea135264cc02030203020302030325abb132a4c897744752a4707644448c653f00743c37cd560218074dfe1e4d2803fe62ee54fd13cf254cb8c3b2bf728d8c26703054588e529bb8b2a68a950ea4e0020302030203020303846e32cbe73ce37fdc6fb93afeed4425035df35d637127b54b8fc4c053d405ff0398c008e116cd33ceac2a28f29c392e533b755c24316cf6e847e4ef72b070dcc602030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb80203035b2ba27d2c4b5ccd82a303efb2a86cf208d08dd952ed0494acc5aff009a9809303e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b02030308e7f519768ebddac099b4d79b3da7e528e3e1e7f43fb5e815cc2e7e9bdb82ca03afa06c5681e457eed414f2a781c5cf2a752257a696aa0d741799d3e5c6ac65b6020303645cd7c283714f070784b2e6a24c049ba20cc01422792ec7507818b757b4d02103c6b1fb9b858f69c598cf4359d1a43ec9faa45a7f308dfcf087b529ecf6cba0d702030364631f25391237453ea4bdf6dcd59ec33334c8bf13b3f4ebc24512193a52368203d4a0ec404056d0dd6b14481bda731e46954f9d29e4d43aba64bb8ca52ca87bd902030329cd1de4c7edfcc761f57c5f2466970add88bd8705390cb23184091c99cbdde603eca27d7686e41e3d24558d66cbc83d2a5d35469522d922ab216914a84d977e36020303aa3f3aaee4ea8cc05d8b5a9f3c4528c8de0d5b4bd9bedd4456b8816a9d7195da036dee15633cb92bdefc8f632e08b85dcb8bf1d317f82dfcbb7b76e38f7421361502030203020303f1a4bc7768286c3e023725e4f781a6b00fb11d83f1dda2647000f13ca3c58544035e062fcd2f3f81c8d4d424e25bf7e77e301465425a25afa5d0bdbeee2c6284b202030203038482b5d9958175078c790a8a1effda8f156db2faa9ff2d6473d742b4f737143903bb264f8b66371fe289f7741ae353b137695ca94cbc9ed3ececd3ef601d54181d02030203020303b9a21b649304cec7a5d6d0464d6bd8ddffb475c672c0c9799b3457e4b9fc2a12038da99cc78f04ba4eaf3df326eeb15cb038c013a9e5b76698f493170bd356b13a020302030203039c69fd3c2b5b5200c89358d29432ddc4cdadbf9d1b05f2265bf4af27d968898503389f85ccddd9ba507ac3bae9f0a830a56eaf35ebde5aeb6c374dadfd0ab39aa9020302030318b62235f3bd9e0e268b30ff1a987af5548f00006ebcf51db0448e220c17e862034465f83c3781a2e121eca23c852e6b742e52e0fd76e2eaf886471d3f5c4a3e8502030203038b2faefda31a8d8e3e5590221ea164997bdaaba30fed699932fa0b65c6ab2fda0396915914ec53b6ea1fea28b0ede76ab410d1dafbf996f2fa7cd37f1b4ddeb59e020302030203020303455b9202298fcd235ea441cc50f29ce15a2a1a9504564159a849211c899dc08003f3df85b9d03df952c76c1f9853ce686f21949c732fc9b161b5759faa36b2cd55020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d0203033cb8f613c530196a2ab151996cc3eb343199c5c0c0adc212268f74f6a092666c0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d020303d7f9465d351f2c4200659307b3cd7cf34d3fea84b9b23bffe5bec395f4a2d88a03ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203036152db0543c6381b557c70b284c75fe77b405b54d279a37db0ea5a382a61abd603b70663772bf3728213f272a0d02b2ded9cd31441fbb82b9a44d266c56e7fdf58020303f967722c10537809246d339e984382cc197deea70a2c433df88fd7797701dc76036e06014c6d6c4d1358aefacae43b83631ffbbb39c93874faa4d589c1f60ca07302030341a2496071d2a84dec9f60bfd3288fdcf01683618900806b1a61a740fcb95d4b0338cf0dcf2e49a0359d0d543a3ac97474876f7605800e270d1c8671dc375720250203034a347b6bdf9e875c714c0790a2ad84b01edf7b15c4d23cacab0598c704417ea7039676ef3f389061effccb4e08a0afc2971c35bf69edbda2e91d9e88486113990e02030203020303927b20cc65cbc0d70e9880b16dfc67b8379ff4a96b95309302803a1819d95ea003eb0ebe2fcfd0a9002bd0985e47dac1c4a01561de0da69bea0bc25ff1b519d5b602030203020303a4c7f2025180b6de7674fc2c91392a565d9a28a77eb193f29d9ba706c6fdb42f03d2bca54ba531de1142b06bb35aed010d55ab6e0d862cdd7807e4136c1b9d0c490203020303ec1282aa791a0b578de360336d5cf95a7f3bf1ffda9cf697b3aacf9417aa38ad03cece3331be90852d59eb04e3bc87b03657c0993626d3e36ebeef97baedd928f00203020303afb305376ba08f5bfaced38f127295f994096684417f9de1a8f496fdccbb3547036bf14c6051f3bdb18d621bed206c3ceb8daf8ec24843921de9af2dc2ba70d5ba0203020303122291009057e848a0e15edd72e47061463ab3aee368289eddc782303e9299cd03678ace78eb9da91eb3fa9105c9969a0aa9abd66ac41ab138aa70346daadd327002030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303fd878c58bb70337606fc9f519700dcabaee2f175ffd956a6d246c56e38de3c5a034ece3162b251497a52be7f417b99722c20de63b35a0387e0cb1d8a1ef6bd34190203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03381ca5f3231b0698c6d69bd685fd1930924395002ee0c9f1f3dc9324570c4f52020303eaf50a55e8bc433b8c594aeb6ce2dff8d6dc8a6fa7d72076a07d6b771d13d78b0311a2827108d1c853cd8a63db81104ad8493e188969ca0339d0a01ed043b47cdd020303f7993cfe3bc67991b923b2f3470e42e23e78ad30096bf8278f293053de07b46703a0c8d263334a785d55f5b7be433841bca1d7ef1b8743e6dacb4e4fdffc52a77a020303b616a1ceb3607803c41329eee93ec3541b2ebbe690a4f29e3234441d7fe22710033646798b76e3f8d1cdcef03b5802388ad826a45b0ba508443fa26d5cd6fca96602030320f9766d80286663ec273eaab27d516a59305f6fdb96957af2602f4c0eef4b8a031c930c476ddc908dc2d5ec253fdd2c6687f32616ae7698ee6f0f6baee9871f780203032d1c40f0360f2f347afb931f1caff15b122c02dd058d53cd31e10da5eb3a5005038da2ec93073400637eda663a2b3095ba8bcf473b6bc0ddba6732c0d88ea26f0402030203020302030349147352cccb7f2119bbfbbbb0a4306ee33992973d1777a3f176a7420854218003a44f6acf78a34c96774821d091ce968f756f12c95ad543c97e52f1c041e5c1900203020303a8a8350630628c9ac16ce93f256b9d92a9cab6a1144cd60fee0f228e02d0d04403fd17945ef7c2a783662ce43c34c9e7ff044bcfb5a3fb24299f994e4317c620010203020303cec89e1b5d9d20c59a319c536ef1ae8c0a67e0ded8f6ce3a7eb1979ef183d3870348dbae09afb5d5232bc158cd3f9c2728348ae93f0742e0d71971d3b26c301c0c020302030376e9a3f309a69b0c2c7ca3184457ba9f0ce19145bc96d4bd846742585cf4e9a903b07dbe4dab435161a33e90e991fdd8ac5c0670d77cf2b72ae5bc530519e6fbaf020302030203020303c423e16fb3487a4d9126ad5c533cf130444a4f099a85777493cbd2d231f27b71033c4bbd0160fa760c6c008ce70d7342f2cd5524690247577a0ca36e15528565cd02030203031e5c59c8eb7467fe1b1b59f78351143028717a9679b5956d1a42ab64efbbdff403bc2db4433eb1e4eb520035e06ee46cdd233cd6f74e4ce438a0743af21cf67ba10203020303c1da641e5501813afe9c4653f2179710154bfe94ebce827d0bf64d70bd3baf7a03e2bf953702f6287b134eee978e1b18a36f65b41c2c673d75876215604661dd50020302030203020303a35901b035cd24570a277362d9ece906ef4d6e00821b55212d69b6fd6775472d037568928f5eecc9599b391e6cb75468d91ac18de51d7e984eb678105c39fc8a4a0203020303791a9ee8b5057a6ca65118869d354dba135fd5c518d63144d3860987f084bbcb033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db049402030328314d2f79ba26dc4f34afce51e50e0e05d61b253861e5ab47cc47dab500310e038502bfdf255197b6c7929c445580eddc7013470aa85f531e89cd595628576ef6020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503de12047d931a054019fb113a4c5d531be2d56ec3d20f99b1628f4d617b15da1c02030361cb373dd54af082c98abe4331b16f360ae70b82b3f111dfe54eab9bb47a85f0031bf72cc92e51f3f38f18d4d0a77c173ee78ae62dce6027288dd37d7f1024df600203032d8279aaf065d93b0e811dfa25bb7c19325ad2e7f99cad95d0737c5390500982036bdc41d82cbe612f8caa639dda471df1d8efe19aba0f39e884b0569c597f68ea020302030320c7fa871d9cbf1112255d49920d07bf151532323d32ceb6ad4da291fad9327403fccd5f970aaf4f45086402c560eeb209d84b4da278dc69f17e3426ba0b273f890203020303c3d3043a6c5a67ae707239a66070748c2efc09d25efbcca01ed86206919ee23d03407bd9bd8d77985f52cc5d8781fc24a200ae2f8bdbaa77b753f7f245f6814c87020302030203020302030365f66ec8e09bf15d73a83402fdc462cbcc40578fdf5d4ef85bbfbf9b5ea5e002039ca41cc26f222ece8fb37316d9436cb914d7041ed51f1d5d3831b735ae2f0721020302030203020302030203020303ce8a414b8283b20263f621799a194ddf5d753bef21ab8253b41de0ba8adf661003a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203037ca01b97c87ac12c8995d3c80f7aab3313747ace5a829f08eb68381a5a9fc54003e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e582000000000000000000000000000000000000000000000000000000000000000004383b69e070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203034c4d4621925c12b3878ebf267a1a013cc3d5675903cb0e22bc6d1df0bace3f8d03c092e19f1fd097b76813fc2412338735dab2f62302645b0e72d195b68a1e4d4702030350620914ec3787f2d03c118a874edb853c9678a3949ce426fc19489744df65e2033a2bcd06528de10b0bf7c316956d5af798ce85d8618011a8db4df56202c17f27020303c27ba5c9e177fdba8afc9cd524bb5616116bb12aac5aa30d1918e36228883fda03003a4d0233fc2ff4bfd5cb02b70ae195150d4d18b59449829e612204e831187b0203038cec528699f0b6819a574be7bea0d083f5999e462c6a464a37c582392140762a0393afd21f19e4329c0ef6b1b06baf963080c2980a73c5937cd6322ef7dc631dc00203038cf4931c97d6aa8c453db3175ebdf27d40e4e34b2b3ac67e8888dc34556a99b603cd716cb8821688b0df7e56b2c31036c17c53a5f6d50b50cfd4e68d30d2420120020303b81ba13ab693dd6dffd70ba32f7bd51fbd5ecd3f58bd8ee96d7b081dbe45efa803dff9ee8db1218deb4733e71215a2e1629d8c9f5e36bc0b8184d70f2ea6e8e01d0203031aafd4025804cbeabfe796224eda42a75577ec804c615abc88953b7c966766a4034baee3dbeedfb1b839869d087bbadb64bd8d4007cef5bfcd038c7f8436c4b7e5020303f659d8fb79866e5a2f9479b24ca74b34dae4e211e6a758e376a1407294fd840e032e9950f2c2283fc366c78f61d806a412a244bebf4dca45f250dceff31fd3a2a802030203020303375268372cd898f2295ec6c9a9838412658bf8a9ba5c309854a92dd747e4eb3c03bf0048ab25caf15956b958175c59038226d0331be1767f2c00ae19bc9f70f9ff020302030203030290f4c412920a6ea22d4ec8091a90d63fc62609d3e55e44da20097cdd8204430338962fdeb56eeda46eb38c254e32bd4fa863167913801664a58d773fa3a4882f02030203020303b83955a533913a8e816c0a9e001379dcbb9a89e48410b365841c552e93987a4a03e42aa480068387d975b85b52ab67acc0d5de816085765f419fec172afc69df34020302030203030e1f9af6f9a3833c51c53d2ee2b598421c0227dc651646350725e51762077ea3039ad12ef2e43458f28d5267d58f355ca92e3f625a595042518e0ccf8b0d4e96e002030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e039f7fc724f0bd8bd083a725fa7d2c8169bd8ca33d31c9146805940f0ee480c3dd02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa7920383f4a4890e8cd73e6e32096e4b11c5c0c50991dff65297720ea9ab7b8ccf3ef302030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446745baae070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203033497b9767463d12616a5b29b2d66156e49b3cccfe6598e2e73d90190e04a15120384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab4701020303be9a5bc3511c4f8466f2316f7d83370249d71f17357eda7cd7135b86b136090703f4ab8e9e9441ad40280807e7462de0147c3471983825b8f70b6066331bc7fdac020303fedefede445ae76cbb5f94df4e543537d9404b97450cef183b95f2fade25aa250384deb1e0e5121e700221d4f1ed13d488aaa4275c46416c2003d8d1eddfd78c180203035ed3f2ee894a97e0bd924d79ee091e8430799ba07601928b7912280a2260717b0365ca974c72e3e2db5912a9c87c5d5f84c2825a1f0196fa4793cee99d4d173351020303678452244e14f477cf17190203e3d2741cde4dada731914ad7987d94a95fe953030018accfc6cce1d6b0b884be4fdf54fc21d7f2b0f320711abea4aaf2dfe49d52020303a72a40bab31ce553d4d303b80f98837eb346dc83decd766ed763739ccaeb1d0f0334cd6263be1472af1b73f09f382ab57941ac5043ccd4feb8c4abeb0f2b0a869702030320b1e0149d703853b7ff9219e776215178aeed36a0c047635656c8a804e0f73f031e01e1d5be3de353277a132daf82f484854a9f96ba3555c42f03f63dac8a72db02030203020302030376f04ec9d36ba7e80f947ada6e0259810101c9e7d45de9422ca4db530e69bced032f63219f0d7ee4e45f66963dffc99e8c00abdc81eba9881462b2586539a99a280203020303608497d825d71973307cda5edcd6d1f94aaf6ffebc7430ce2f8a9d7d58116881030188ddfeeb33494d9da289334e6a850d240512bc570857f475ef749bf48e83a502030203020303053f6a4c851f9d91a49431b1d1a85baeaf2538b0f94b8cbb089e9c440263e6de03bd1e02a14a5262aa6cbec4a59e8040bf5765c7b59d08656f0e7be78b166a80690203020303856ac3a92e4d8741331982bc77755fc5831ec683050767f2b9d5aff1fa786cc60386b7ce083e8e52be86e0780b6ed5bb8680acbc96259692245ec1f521c01e26d702030203031bedfc4c9c092921f7e7093b07a76dfcc37b9d1c851d1f62a66f209c22913b4c03956a3522834623943651a19db780c3e98710a341789ae868713e46e1b9c1b98202030203020303b9828062d71ab57eb548db7b526b960e119dbc14ae39cac02d7d00f8a195db8803c44a3b5fecff2afe00f98bf696d082a6a5175c5df8dfe72521b2c5659911b8480203020303b59aba5f6d921a646bbebb06a5e66188f856d56c93703805fe05e98a54ba2cee034576a02a1bd3f5e7b0a9f8d07e5a1fc02840fb40cae2f9f6400700f945b53a3102030203032cd57ed327e45cf463bdcaee1f1a638c75288b1ed961644cb925f5fdc451a63903bd691a351ce40e663ca27d33cbc4a814e08f6f9ca16661964166d7efcc7e711f0203020302030381d90d25cc12dca6684d3ffebcf1e5408d0a365c9242224f678548c5963ea95003b33603045dce8c8ff12133b56e2a2d3ed094cf962edfb62711e929ea2ec4f37f02030203020302030203020303beae4badab0dddd8f2223233bd5c4be6b07ced210a07e1372dd0f271fede38a903c41fa2d194a23c8b2c481a227801390c6671150c1d39cda7c9c0e883a05d629f0203070354000000000000000000000000000000005ca1ab1e5820966c9e067cc4e8a48ff6ae64ca4d8bf749f01266043ce38d3671782ac86c738058209971e52f05a7ac040ea951a9f045f6b0ca4f28805a30e62273f331e4980b3a37070354000000000000000000000000000000005ca1ab1e5820693c07ff6229368d9eeeeacfb0e582d270d5bbb89324a80731633d2bbd5c77535820e893eedb484a91d508d6a0fa224172039b6c2fb9415b8e17729aa55b42ba046c02030389e3fca41e0d8f09375c382623317ca86dbc11e768d3092378ee9200e7d24b29037cf6827003ef81b19f0742ffe1d5ae37c11f93b8d3d4043889bf098c271f2e720203038a710f76a0386b80e437c9bb7e2b0795891cc8dea00d59b49f1e62c95c385b7303aab1cd0e8ec8a91b65dcd0e5b347817bb11adde535c72ada7effe7988d7fd7ec020303eba38e93550f3ac5a10e031758339464c3e9bb984e93c5eed408d709b33e437203374d10d55d31d4afc6583fc4cc50332529ed4926608f647c2137442ca096f1ef02030328c04fd39c18b7b519c893153c0291b3cebd6f815290312e4200f9fc82c92db30322339478fdcca672932963533acfa941a5a526cd3c8b8639135df8f9914afe0b02030327a8ae74de2be2de1caf4666e833e43d52e360ca878143e3897046b3c3690e56030915b77a3bc6ba544017c15f038d9d5aae50e7db83f6d6feec452467c41ff98f0203037511f8af75f75f986a1c8714ba6c45599dcaa9f61e250f7099d27915b46a8ee403a79923041d4666433c3bbc2b46c8c137c489a36d2fa5c84872f3e254780666af020303dc8a20e01f59403e932ea67c29b74d1615fed5768abcf1df2432e56a0bb0ee2c03462f1a21b3bf910d6ca08e90a47a11d25ad48c6e4d6bff7369f2f28dad30ac7802030379e75e6dec2ffd37919ce25da5a5a67adcff93790a7cef7ba0c1534aab2208c0034a95a94a94ae7a317e153903a888ab404b75042483abfc1b53994584fed445ba0203020303674f36f2a847c25e092483b595d6d69338bbf807516d5b424e3ab05fc95719cd031b713460225852cb9a5005429fdfdc44c8d71153e1aa04364570343a434c6388020302030325b7b7ced4578ad2120d4703c775f82d4fcf6ff0578d60d76b1b3d5bf982812e03ee4f0964b55a782cc4128ae4291d947dfd63300081d89319ddc357b6e9a7d365020302030360d8ba62d7544d9d354731fc286a41c33869212a45b7322d2c4e67504738e65103b0ae0178f65708516a57beaa38a2cd4d344fe8f3a217f1fe9c4cb2d41b2975b502030203020303b5419117efdf04c24efdb20fe6e1786d53529a5630a98108d2118c7dca7c136e03aaa09bc73d06dc35034f97fec917652f664c4d768d0c036b2539e661f2d8fc380203020302030203020303d51b1bcd3eab3b6a6c384a831ec8080cab63c1c44d898bd626248194376fe1de037ecc252f46692f6e8959f226a5e94a4ac314d38000dabd3c66c06489789651bc0203020303248599a8b4c29a8dfd17c29080088b4573842ac4a1fc2fb628f1f65350cbc2bb034f7ae2704632668f91a5605aa808807c7c83f999d35def3d40f72a825eb561ec020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b02030392940198d6b1df58f0a6c3cc1da02efd9547043d626487166ec858a5aae7b61903efbf993292364275b60efda91d4c18f5a87549ebd407ba16763b1f0c6113a6cb0203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303d822fd2ee43799eb3f714589ce7bea36550c8fe4a90b5a2aa68a95486490962c03718887bb3381bda095c52048955b7ce2e5b35e5bec11515e9fa86187fa9e0fa70203032d3bd6d97e29b994d2d73867b8e19f6b3b2a43d5992a6b9b3f350f8834c0da9a039f1aca3992b3c769a6735830359c9153c2daee33d937c9b903a05ed9ada8f5d0020303bddef4e591830295d3c1f6e27decf301748434c3df51aa8b451558ee7962deea03926ffe488854b96b623872e9a54c6e5bb737fa12f402bd8a2a79f34000ba21520203037b0ddd3f35449b7e435243005ad2a536fa28167cf7da21ecd2d4c3a55e6421a6030fd4916c5757cb6e137fac5d203ba3d5d50a5077a06e3804faa80e9783e2367d0203036fff8395e24c14d9e40936920b141c84e2127ed823a1625699eaebd1f26b69c703069db41eccbdb4aa227cb482a97d6b342d0413855bef3c9b432d74ef0be43e0b" + witness2 = "0102030203020302030203020303ddd15247a8b234236d91271277b1059a674eaed56c29a6d8905b27ea9460c7e40344f7576ca6198b0bb6daa81b4eb6f594b46608e0f4d8d509361f0aac88eed2b50203020302030203020302030203037477c5b7ac361fa5a28f01782fc1b9577dfe27c9d91e5193c426916c166503f3033e6831fb92c6944c4869e9ff429fd40b9191f5a5a9fd8e4e26f67be29feb3d00020302030310c0064663f729ce8c12a4db054317ae8a3d309ee54378eba25ca39a4670758d03fa715595952a40ebcc9c06b02f6b1960a1f74a722c3a9fecba1aa66f32f1850e0203020303b010b79cdf4c9bd8f8164ad282defed968658e80fa57c26c19f5cadcfd9c890e0318f8d37b605fba62e9bd02f5554b8bd4784578021c737c4cb957c4ed5e8ad3b5020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a0344ed07cf7b7b49fc2e7fc9c6c19d1b60e64990110188e15b445320a35660f91d02030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358207d7b0aec16983b640324af57c161ae800ab5b0b61937d153540fd64ba724a431020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303e5f90b59ef9f5ceee0e0a54551e41a62431ea06aa09be94779c474ca4d18683403e794dec8b1cbcd53bbecf14b61869699ed3f92ecbb4ac3d9a8bc744c09a3e69a020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97030e077b0c8215c8c43753d044e318788eb8e39de692fe0ccd46396d3b06ca6e0c020303ddaa569a922c9c28d9a225a4c212c076ad5e353bb7cceaab630a3884af855d2403d5ff4c122142c026a0b24b79b4a667d25ea916ef64d8a8215aa29a738c5588a50203034b1d465e96a44ba0d7983a6f4ce10a26bce7816b6d51ba8ac594c71892cc2af60381a6db28188e1b651603d41fbc2030bb2b7706e02b1eb3423d7f38ff6ef514e6020303f30f3c3ad2db979a1c81690619a35a801e3bcd77413f37e285b0011f2b6e2a4003239d1f94c6460af24c7228a2af86326ea1199e97365bf7dc5832ad029107445f0203038518fa303494de83c9ae1f80c877b5c0e6dba41880f6df1dbaaff30fa9b9c37a03653c1b2e876da5bd8b6535ce431ae69feb7be788cc67b2fa3dbff11c792c1f13020303d5efbfce398f4205569b3fc872e7405712796b7189d6846e61c7ff33a12ab0c5037aeb2da8a9e504490ac07aee509079823397fc6e9cd25257e658f6e0021ae771020302030203033bfe86ca5a55d4d2d42f5af48205ca0ab08df68e551e61b9a1bd5d575ff9cac3037462982abd4a0437ab5e12ab2af263ab382e0ceba69ff5de751519512149c70a0203020303980043fe396689718e09b0990d71b800219da2873a8e0c3c45d25ffe12bd9e6003f2f9aba950a1023ef8e02568c683c86ef2e77e16dfad909642ddc5cc57ac8c120203020303738b4a16af664d0e0c6b7ff278d1e3b602e6277085730d77844f1430c2f71bcd032c505136023a2005bd6b8abfc49eb783514ea36233d5439525478dc102ad67e402030203020303f30cfa6f63115cc17d752bd07a3848c463334bdf554ffeb5a57f2ac2535c4650037d85b4ea9025d3512a6fafe55d8e3570fc8c968eb67042e0ded283dcadc12ae8020302030351a20a2e192372b9383e5b8ef255adf58a3633e5aa4161424f7b52912e8053f603edc4f75f70c3608079c9f0b4584da6270879e9983bb3513d7e620024f15e659f02030203037e1734c6c90368548b9b6a882b4560d78e0630f3616dc7d4b4b8d77b96a42dbf03c4ed6f8e6cdc9797199a463a51287700852a10099a1386109a37561b538d228502030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc8030203020303521bd9da187efcbab07451097baf98589a33e32cd33501c5a912f48cf2552bef0352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303227561f72db4ee550290a7b85931038224b1fa9c351395f5f5777f397016d7ae03dde5f312229c20faf5b1b27273112bc022bd0d1dad4195ffeeceb49c05001a07020303d4eebbde54471ef4008ea3e23e4bd31119b1d4fa51a2bce7771c95b70efba064038c6a2b8e1f68d72b2a95ef69cd8eb0ab32781e7687049eaf3b7381596c0bb8af0203036ae82b7b420a58afe9871a632d69be8475f745405df2183722c599f94a5cf15f038a575afe8d81ea9f181bee15a971affeffcb1964ed35ec291304be393899d80f02030203020302030203020303d634ac486eb2f4e325a096c1aac56ae5a0a3bba406dcbede2e9bd4837d1759f203ce1b43774de78b19d67b133fb575ead398fae6a712ebd63e26671f199c8e674302030203020302030203036068b215f89f68246518e7d8c967f8ae78b47c69bcb9e97deca5849a813b2e400384b630ffc67a1dd7b502c1b42165171a704d68ed15ced3b7cbb98bd150cd884b020302030203020303a3c7cf45ebdd7e21dade3434624c9fd521b0ab24a6956e3b8a777d700149806703b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa5820000000000000000000000000000000000000000000000000000000000000000158206191319cb3bf48d9701195789dbbf6db5d3b99006317f5e7da37709f3d259374020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da02030350179cb850b147782f26ff9a17895259e569b740cd6424a7a1479602bd8c822b0371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb81031ede6b255a1c6ffd6fa2afc16d61aea6555a5cb85dc4669070b69b55a16ac58d020303335f1f02ebdb1926380c362d23b2d90d791f5ec8531287a47d3a1929d6304f1b037b80208ab1e9bc0411f128ccc859ac552945a650ebd0f9161a63fc9944e8d43f0203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f6610223030bc731957e48cd1b0f92521fa971ca65f76bc8608eaddfa5243baf39838099810203034b176bbe991dbc4ae5738f23e31433597f9b892730ad4fdc86784eb928cbc642035fa76b91882dff00646e99735fb6accf89476e59c9dd28e0bc7b617870cc15e702030309cc884c89c3b546aecc35f4a95760a100cc3fb5457a82fc413ee2cd795345d2037ac7f4c6f9dc0e8f652f47fbda5c4b53428948acc95270be462ef8c909e5b742020303bb4f79f1339f6fc4ba3b4c61ff1940c27b29459942791fd7b160a04bc8aa411803628f665215c8c44bda1a6243487e35e5d9d922bf36f1976fd5f6c39264d16e8b0203036bdcb7f00d848df36ea8ffe2d419775be23396cb7344a2dd1ab44292769c922303710d1c2ccfa13ceec5ffc97b3469592c4f2495141e46bbaaae6f099c47b9737502030203035f97c209aeacbb5fc78e69d247a800528f4bcc5e649fdec626ae5ef510ee7a71036eeb37a43ca943f0bb09cb54bfcc7325ed27e97af16f59cab0822f88d3143888020302030373d9994e2d75a6f80adb914f45d533caf2ade8d952a0d8b73a40299199892f5f0347947690bda8388fbc8744af22af00157531bd0f37353b2407b573cff34e23c20203020303cd4c5dc3e51e3a3379cf73004c787ee7cb312c06c70800d0f08e65a0ee2313c40350adbcaba1f1a5b06ae4510704194cefdb5053ffacdca11f354a80cc04d0a2f402030203037ec1e64855ec72f6c39f1832616a45075eda4889495c393ffb673aa05f25e67d0361c764032c6e6f093f7e4e2db6e3324b29e59ee4df2f6df3536539ea135264cc02030203020302030325abb132a4c897744752a4707644448c653f00743c37cd560218074dfe1e4d2803fe62ee54fd13cf254cb8c3b2bf728d8c26703054588e529bb8b2a68a950ea4e0020302030203020303846e32cbe73ce37fdc6fb93afeed4425035df35d637127b54b8fc4c053d405ff0398c008e116cd33ceac2a28f29c392e533b755c24316cf6e847e4ef72b070dcc602030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb80203035b2ba27d2c4b5ccd82a303efb2a86cf208d08dd952ed0494acc5aff009a9809303e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b02030308e7f519768ebddac099b4d79b3da7e528e3e1e7f43fb5e815cc2e7e9bdb82ca03afa06c5681e457eed414f2a781c5cf2a752257a696aa0d741799d3e5c6ac65b6020303645cd7c283714f070784b2e6a24c049ba20cc01422792ec7507818b757b4d02103c6b1fb9b858f69c598cf4359d1a43ec9faa45a7f308dfcf087b529ecf6cba0d702030364631f25391237453ea4bdf6dcd59ec33334c8bf13b3f4ebc24512193a52368203d4a0ec404056d0dd6b14481bda731e46954f9d29e4d43aba64bb8ca52ca87bd902030329cd1de4c7edfcc761f57c5f2466970add88bd8705390cb23184091c99cbdde603eca27d7686e41e3d24558d66cbc83d2a5d35469522d922ab216914a84d977e36020303aa3f3aaee4ea8cc05d8b5a9f3c4528c8de0d5b4bd9bedd4456b8816a9d7195da036dee15633cb92bdefc8f632e08b85dcb8bf1d317f82dfcbb7b76e38f7421361502030203020303f1a4bc7768286c3e023725e4f781a6b00fb11d83f1dda2647000f13ca3c58544035e062fcd2f3f81c8d4d424e25bf7e77e301465425a25afa5d0bdbeee2c6284b202030203038482b5d9958175078c790a8a1effda8f156db2faa9ff2d6473d742b4f737143903bb264f8b66371fe289f7741ae353b137695ca94cbc9ed3ececd3ef601d54181d02030203020303b9a21b649304cec7a5d6d0464d6bd8ddffb475c672c0c9799b3457e4b9fc2a12038da99cc78f04ba4eaf3df326eeb15cb038c013a9e5b76698f493170bd356b13a020302030203039c69fd3c2b5b5200c89358d29432ddc4cdadbf9d1b05f2265bf4af27d968898503389f85ccddd9ba507ac3bae9f0a830a56eaf35ebde5aeb6c374dadfd0ab39aa9020302030318b62235f3bd9e0e268b30ff1a987af5548f00006ebcf51db0448e220c17e862034465f83c3781a2e121eca23c852e6b742e52e0fd76e2eaf886471d3f5c4a3e8502030203038b2faefda31a8d8e3e5590221ea164997bdaaba30fed699932fa0b65c6ab2fda0396915914ec53b6ea1fea28b0ede76ab410d1dafbf996f2fa7cd37f1b4ddeb59e020302030203020303455b9202298fcd235ea441cc50f29ce15a2a1a9504564159a849211c899dc08003f3df85b9d03df952c76c1f9853ce686f21949c732fc9b161b5759faa36b2cd55020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d0203033cb8f613c530196a2ab151996cc3eb343199c5c0c0adc212268f74f6a092666c0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d020303d7f9465d351f2c4200659307b3cd7cf34d3fea84b9b23bffe5bec395f4a2d88a03ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203036152db0543c6381b557c70b284c75fe77b405b54d279a37db0ea5a382a61abd603b70663772bf3728213f272a0d02b2ded9cd31441fbb82b9a44d266c56e7fdf58020303f967722c10537809246d339e984382cc197deea70a2c433df88fd7797701dc76036e06014c6d6c4d1358aefacae43b83631ffbbb39c93874faa4d589c1f60ca07302030341a2496071d2a84dec9f60bfd3288fdcf01683618900806b1a61a740fcb95d4b0338cf0dcf2e49a0359d0d543a3ac97474876f7605800e270d1c8671dc375720250203034a347b6bdf9e875c714c0790a2ad84b01edf7b15c4d23cacab0598c704417ea7039676ef3f389061effccb4e08a0afc2971c35bf69edbda2e91d9e88486113990e02030203020303927b20cc65cbc0d70e9880b16dfc67b8379ff4a96b95309302803a1819d95ea003eb0ebe2fcfd0a9002bd0985e47dac1c4a01561de0da69bea0bc25ff1b519d5b602030203020303a4c7f2025180b6de7674fc2c91392a565d9a28a77eb193f29d9ba706c6fdb42f03d2bca54ba531de1142b06bb35aed010d55ab6e0d862cdd7807e4136c1b9d0c490203020303ec1282aa791a0b578de360336d5cf95a7f3bf1ffda9cf697b3aacf9417aa38ad03cece3331be90852d59eb04e3bc87b03657c0993626d3e36ebeef97baedd928f00203020303afb305376ba08f5bfaced38f127295f994096684417f9de1a8f496fdccbb3547036bf14c6051f3bdb18d621bed206c3ceb8daf8ec24843921de9af2dc2ba70d5ba0203020303122291009057e848a0e15edd72e47061463ab3aee368289eddc782303e9299cd03678ace78eb9da91eb3fa9105c9969a0aa9abd66ac41ab138aa70346daadd327002030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303fd878c58bb70337606fc9f519700dcabaee2f175ffd956a6d246c56e38de3c5a034ece3162b251497a52be7f417b99722c20de63b35a0387e0cb1d8a1ef6bd34190203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03381ca5f3231b0698c6d69bd685fd1930924395002ee0c9f1f3dc9324570c4f52020303eaf50a55e8bc433b8c594aeb6ce2dff8d6dc8a6fa7d72076a07d6b771d13d78b0311a2827108d1c853cd8a63db81104ad8493e188969ca0339d0a01ed043b47cdd020303f7993cfe3bc67991b923b2f3470e42e23e78ad30096bf8278f293053de07b46703a0c8d263334a785d55f5b7be433841bca1d7ef1b8743e6dacb4e4fdffc52a77a020303b616a1ceb3607803c41329eee93ec3541b2ebbe690a4f29e3234441d7fe22710033646798b76e3f8d1cdcef03b5802388ad826a45b0ba508443fa26d5cd6fca96602030320f9766d80286663ec273eaab27d516a59305f6fdb96957af2602f4c0eef4b8a031c930c476ddc908dc2d5ec253fdd2c6687f32616ae7698ee6f0f6baee9871f780203032d1c40f0360f2f347afb931f1caff15b122c02dd058d53cd31e10da5eb3a5005038da2ec93073400637eda663a2b3095ba8bcf473b6bc0ddba6732c0d88ea26f0402030203020302030349147352cccb7f2119bbfbbbb0a4306ee33992973d1777a3f176a7420854218003a44f6acf78a34c96774821d091ce968f756f12c95ad543c97e52f1c041e5c19002030203020302030203020303b990df7130026def95a6bd8c75f955e81827b81134286380b827ccc8d59020bb03acc511b7a7e46ccf36ec7d94e25df35ebd2b1bdb6754891973d081dbb84b74c3020302030203037bcfbafd729a64f6a285d2cb27f169b7f38544bcd685a9a551029d47527b7dc70315cfee27b492bf2f3b5144c78e9ffaadbcb1ab045fa6e58965731a597f48e9eb02030203020303d9b9c9b6e08cb6200a2ead0a7b44aa81f526cec46dd91a23e67370c74e198a1703502ff4229c44844597f1659b073f9ea36ead050cc08aa533e40f9a3a38d1407f02030203039708356582bcd2add889a2bafd2ad4c93eb63fa742f601c045b9e98e1149112903727c6112bad2314490ec2e9b95bdf87c45c93b0ee92fb4a0707bc806c0723c280203020302030309c10778ca4fd1e78c03c22cf95624e6d9b1845103efbf1dd6e56c4d47beab4a0357fd6003666e25f92b0e831fd0f7a0574664f4355a1cf4073937a1664bccea64020302030382b763d46efd8db57bde8120303a4dfd77ee10456b1b2a46916e75b6f0a29b140350037449c92721dfd8d234901d464a1cd6403666af10a822691082f192df864502030203020302030203070354ba42ee5864884c77a683e1dda390c6f6ae144167582089780709b74f02d53045c2451635aa24be6675c290344ab3fd48e15b49e3ea685713fc1f351312af1faacc6d6ecf4013834144b9d9b99c83070354000000000000000000000000000000005ca1ab1e5820565d8b0ba637731af59e8597dc7f3c3f039a169ba1e83c6daef2d43a8117db505820f4cbb10315f5e55053e70d60059ccf403d4e9e90cf36a791de2607fa7a89f1ed0203037d9fcfae52b9e7cb960a57b588a5e913fc59bcd8e1e3a72545680ae787cd9a080340f594bc38ee796cbfa7f62e7375b6e27cf3b90da8baef7cb8225b98a6dc06f9020303fe38e4536c064741e62c787ceaff5b8252f552b1e7c0dc2bd09b0ad991b62b0803d4a9342427b3fb884bbbbfdbef9db4edcfc599bcf4918022291891ff47a4473c020303133b5e6a3d0e759a8df9838b021551845f1e112062741d51919df0ba3110621c030b0cb1f937b231765bdfb0ada592cb4ec4e37b7ce68ad21315051f0bca23e93602030360b50daa81d1eebf433a86cdc63d639bb88efd0837df61c20ef4a07b86d4df9103c38b3536ac0104b6bbdc2fb47f57e4eef152aa4c727f7040d362604644adeb43020303685524b040ca7b4e87a1e2e05e3c0c0e289d68a623eb6b014a9b08d3525c072a03e2f0a7769adbb870f5ba21929643c23ce8a0e8149c6003bf635a14218eff4307020303c3d7afa90b5337e37f369666a5fe1e26675836c3adbf5b685277769112a2445e038d8f4c4e40c45232c7da072bf7ec8aa1feee967030d0ac4beb626cb50f2dbc8b0203038590c5066fd108dc4907febbdfb860f25f821349acf99d458bf4df063c0941d303d1c8f444e3b9f496780241daf46ee0ca3dfed98ccd58102a13bee062db56089802030381362d398fbe328b72f0cee739e72b7a5ace40a66aeaf298ef98f620c2b3b3da039ba3165764fca29bf2a7c01813fe58996e8e705882dfd43f6c5e17c54b307a4702030326b74aee4a5123aae28720835cc1a727db62addf31878a96a703fc43875a400203f2e248472501c5a6bb2e13c3ae7a208a35d01dbc41e44affa9e4c0300e2c2912020303ac5ba43d3be112366057444e9a2db12b96222bb7bb88139738320cc53924cacd03462978b4844a138be32fc8d45c274d7d1e77539a2839950eca4dbb779ade3db00203020303cec89e1b5d9d20c59a319c536ef1ae8c0a67e0ded8f6ce3a7eb1979ef183d3870348dbae09afb5d5232bc158cd3f9c2728348ae93f0742e0d71971d3b26c301c0c020302030376e9a3f309a69b0c2c7ca3184457ba9f0ce19145bc96d4bd846742585cf4e9a903b07dbe4dab435161a33e90e991fdd8ac5c0670d77cf2b72ae5bc530519e6fbaf020302030203020303c423e16fb3487a4d9126ad5c533cf130444a4f099a85777493cbd2d231f27b71033c4bbd0160fa760c6c008ce70d7342f2cd5524690247577a0ca36e15528565cd02030203031e5c59c8eb7467fe1b1b59f78351143028717a9679b5956d1a42ab64efbbdff403bc2db4433eb1e4eb520035e06ee46cdd233cd6f74e4ce438a0743af21cf67ba10203020303c1da641e5501813afe9c4653f2179710154bfe94ebce827d0bf64d70bd3baf7a03e2bf953702f6287b134eee978e1b18a36f65b41c2c673d75876215604661dd50020302030203020303a35901b035cd24570a277362d9ece906ef4d6e00821b55212d69b6fd6775472d037568928f5eecc9599b391e6cb75468d91ac18de51d7e984eb678105c39fc8a4a0203020303791a9ee8b5057a6ca65118869d354dba135fd5c518d63144d3860987f084bbcb033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db049402030328314d2f79ba26dc4f34afce51e50e0e05d61b253861e5ab47cc47dab500310e038502bfdf255197b6c7929c445580eddc7013470aa85f531e89cd595628576ef6020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503de12047d931a054019fb113a4c5d531be2d56ec3d20f99b1628f4d617b15da1c02030361cb373dd54af082c98abe4331b16f360ae70b82b3f111dfe54eab9bb47a85f0031bf72cc92e51f3f38f18d4d0a77c173ee78ae62dce6027288dd37d7f1024df600203032d8279aaf065d93b0e811dfa25bb7c19325ad2e7f99cad95d0737c5390500982036bdc41d82cbe612f8caa639dda471df1d8efe19aba0f39e884b0569c597f68ea020302030320c7fa871d9cbf1112255d49920d07bf151532323d32ceb6ad4da291fad9327403fccd5f970aaf4f45086402c560eeb209d84b4da278dc69f17e3426ba0b273f890203020303c3d3043a6c5a67ae707239a66070748c2efc09d25efbcca01ed86206919ee23d03407bd9bd8d77985f52cc5d8781fc24a200ae2f8bdbaa77b753f7f245f6814c87020302030203020302030365f66ec8e09bf15d73a83402fdc462cbcc40578fdf5d4ef85bbfbf9b5ea5e002039ca41cc26f222ece8fb37316d9436cb914d7041ed51f1d5d3831b735ae2f0721020302030203020302030203020303ce8a414b8283b20263f621799a194ddf5d753bef21ab8253b41de0ba8adf661003a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203037ca01b97c87ac12c8995d3c80f7aab3313747ace5a829f08eb68381a5a9fc54003e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e582000000000000000000000000000000000000000000000000000000000000000004383b69e070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203034c4d4621925c12b3878ebf267a1a013cc3d5675903cb0e22bc6d1df0bace3f8d03c092e19f1fd097b76813fc2412338735dab2f62302645b0e72d195b68a1e4d4702030350620914ec3787f2d03c118a874edb853c9678a3949ce426fc19489744df65e2033a2bcd06528de10b0bf7c316956d5af798ce85d8618011a8db4df56202c17f27020303c27ba5c9e177fdba8afc9cd524bb5616116bb12aac5aa30d1918e36228883fda03003a4d0233fc2ff4bfd5cb02b70ae195150d4d18b59449829e612204e831187b0203038cec528699f0b6819a574be7bea0d083f5999e462c6a464a37c582392140762a0393afd21f19e4329c0ef6b1b06baf963080c2980a73c5937cd6322ef7dc631dc00203038cf4931c97d6aa8c453db3175ebdf27d40e4e34b2b3ac67e8888dc34556a99b603cd716cb8821688b0df7e56b2c31036c17c53a5f6d50b50cfd4e68d30d2420120020303b81ba13ab693dd6dffd70ba32f7bd51fbd5ecd3f58bd8ee96d7b081dbe45efa803dff9ee8db1218deb4733e71215a2e1629d8c9f5e36bc0b8184d70f2ea6e8e01d0203031aafd4025804cbeabfe796224eda42a75577ec804c615abc88953b7c966766a4034baee3dbeedfb1b839869d087bbadb64bd8d4007cef5bfcd038c7f8436c4b7e5020303f659d8fb79866e5a2f9479b24ca74b34dae4e211e6a758e376a1407294fd840e032e9950f2c2283fc366c78f61d806a412a244bebf4dca45f250dceff31fd3a2a802030203020303375268372cd898f2295ec6c9a9838412658bf8a9ba5c309854a92dd747e4eb3c03bf0048ab25caf15956b958175c59038226d0331be1767f2c00ae19bc9f70f9ff020302030203030290f4c412920a6ea22d4ec8091a90d63fc62609d3e55e44da20097cdd8204430338962fdeb56eeda46eb38c254e32bd4fa863167913801664a58d773fa3a4882f02030203020303b83955a533913a8e816c0a9e001379dcbb9a89e48410b365841c552e93987a4a03e42aa480068387d975b85b52ab67acc0d5de816085765f419fec172afc69df34020302030203030e1f9af6f9a3833c51c53d2ee2b598421c0227dc651646350725e51762077ea3039ad12ef2e43458f28d5267d58f355ca92e3f625a595042518e0ccf8b0d4e96e002030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e039f7fc724f0bd8bd083a725fa7d2c8169bd8ca33d31c9146805940f0ee480c3dd02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa7920383f4a4890e8cd73e6e32096e4b11c5c0c50991dff65297720ea9ab7b8ccf3ef302030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446745baae070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203033497b9767463d12616a5b29b2d66156e49b3cccfe6598e2e73d90190e04a15120384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab4701020303be9a5bc3511c4f8466f2316f7d83370249d71f17357eda7cd7135b86b136090703f4ab8e9e9441ad40280807e7462de0147c3471983825b8f70b6066331bc7fdac020303fedefede445ae76cbb5f94df4e543537d9404b97450cef183b95f2fade25aa250384deb1e0e5121e700221d4f1ed13d488aaa4275c46416c2003d8d1eddfd78c180203035ed3f2ee894a97e0bd924d79ee091e8430799ba07601928b7912280a2260717b0365ca974c72e3e2db5912a9c87c5d5f84c2825a1f0196fa4793cee99d4d173351020303678452244e14f477cf17190203e3d2741cde4dada731914ad7987d94a95fe953030018accfc6cce1d6b0b884be4fdf54fc21d7f2b0f320711abea4aaf2dfe49d52020303a72a40bab31ce553d4d303b80f98837eb346dc83decd766ed763739ccaeb1d0f0334cd6263be1472af1b73f09f382ab57941ac5043ccd4feb8c4abeb0f2b0a869702030320b1e0149d703853b7ff9219e776215178aeed36a0c047635656c8a804e0f73f031e01e1d5be3de353277a132daf82f484854a9f96ba3555c42f03f63dac8a72db020302030341ad95a71f5d9ac2a4472b72437b529d9683cd3110874426bf5a3cf9fcb979a703a054c300828ecfa19bda2ca0f4d770134b6812dadef254990956f75f418010920203020303674f36f2a847c25e092483b595d6d69338bbf807516d5b424e3ab05fc95719cd031b713460225852cb9a5005429fdfdc44c8d71153e1aa04364570343a434c6388020302030325b7b7ced4578ad2120d4703c775f82d4fcf6ff0578d60d76b1b3d5bf982812e03ee4f0964b55a782cc4128ae4291d947dfd63300081d89319ddc357b6e9a7d365020302030360d8ba62d7544d9d354731fc286a41c33869212a45b7322d2c4e67504738e65103b0ae0178f65708516a57beaa38a2cd4d344fe8f3a217f1fe9c4cb2d41b2975b502030203020303b5419117efdf04c24efdb20fe6e1786d53529a5630a98108d2118c7dca7c136e03aaa09bc73d06dc35034f97fec917652f664c4d768d0c036b2539e661f2d8fc380203020302030203020303d51b1bcd3eab3b6a6c384a831ec8080cab63c1c44d898bd626248194376fe1de037ecc252f46692f6e8959f226a5e94a4ac314d38000dabd3c66c06489789651bc0203020303248599a8b4c29a8dfd17c29080088b4573842ac4a1fc2fb628f1f65350cbc2bb034f7ae2704632668f91a5605aa808807c7c83f999d35def3d40f72a825eb561ec020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b02030392940198d6b1df58f0a6c3cc1da02efd9547043d626487166ec858a5aae7b61903efbf993292364275b60efda91d4c18f5a87549ebd407ba16763b1f0c6113a6cb0203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303d822fd2ee43799eb3f714589ce7bea36550c8fe4a90b5a2aa68a95486490962c03718887bb3381bda095c52048955b7ce2e5b35e5bec11515e9fa86187fa9e0fa70203032d3bd6d97e29b994d2d73867b8e19f6b3b2a43d5992a6b9b3f350f8834c0da9a039f1aca3992b3c769a6735830359c9153c2daee33d937c9b903a05ed9ada8f5d0020303bddef4e591830295d3c1f6e27decf301748434c3df51aa8b451558ee7962deea03926ffe488854b96b623872e9a54c6e5bb737fa12f402bd8a2a79f34000ba21520203037b0ddd3f35449b7e435243005ad2a536fa28167cf7da21ecd2d4c3a55e6421a6030fd4916c5757cb6e137fac5d203ba3d5d50a5077a06e3804faa80e9783e2367d0203036fff8395e24c14d9e40936920b141c84e2127ed823a1625699eaebd1f26b69c703069db41eccbdb4aa227cb482a97d6b342d0413855bef3c9b432d74ef0be43e0b" +) diff --git a/smt/pkg/smt/witness_utils.go b/smt/pkg/smt/witness_utils.go new file mode 100644 index 00000000000..5aadf4d6cdf --- /dev/null +++ b/smt/pkg/smt/witness_utils.go @@ -0,0 +1,11 @@ +package smt + +import "fmt" + +func intArrayToString(a []int) string { + s := "" + for _, v := range a { + s += fmt.Sprintf("%d", v) + } + return s +} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 618a6c5bdc1..35a753021c2 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -291,5 +291,7 @@ var DefaultFlags = []cli.Flag{ &utils.InfoTreeUpdateInterval, &utils.SealBatchImmediatelyOnOverflow, &utils.MockWitnessGeneration, + &utils.WitnessCacheEnable, + &utils.WitnessCacheLimit, &utils.WitnessContractInclusion, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 9aa4cecca5b..e71fb621ecc 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -131,6 +131,13 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { badBatches = append(badBatches, val) } + // witness cache flags + // if dicabled, set limit to 0 and only check for it to be 0 or not + witnessCacheEnabled := ctx.Bool(utils.WitnessCacheEnable.Name) + witnessCacheLimit := ctx.Uint64(utils.WitnessCacheLimit.Name) + if !witnessCacheEnabled { + witnessCacheLimit = 0 + } var witnessInclusion []libcommon.Address for _, s := range strings.Split(ctx.String(utils.WitnessContractInclusion.Name), ",") { if s == "" { @@ -219,6 +226,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), + WitnessCacheLimit: witnessCacheLimit, WitnessContractInclusion: witnessInclusion, } diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 14cf4baaea5..dfd8814026c 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -9,7 +9,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" @@ -35,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/smt" smtUtils "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" @@ -44,7 +44,6 @@ import ( "github.com/ledgerwatch/erigon/zk/syncer" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" - zkUtils "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" @@ -832,7 +831,7 @@ func (api *ZkEvmAPIImpl) GetFullBlockByNumber(ctx context.Context, number rpc.Bl // GetFullBlockByHash returns a full block from the current canonical chain. If number is nil, the // latest known block is returned. -func (api *ZkEvmAPIImpl) GetFullBlockByHash(ctx context.Context, hash libcommon.Hash, fullTx bool) (types.Block, error) { +func (api *ZkEvmAPIImpl) GetFullBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (types.Block, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return types.Block{}, err @@ -974,7 +973,6 @@ func (api *ZkEvmAPIImpl) GetBlockRangeWitness(ctx context.Context, startBlockNrO } func (api *ZkEvmAPIImpl) getBatchWitness(ctx context.Context, tx kv.Tx, batchNum uint64, debug bool, mode WitnessMode) (hexutility.Bytes, error) { - // limit in-flight requests by name semaphore := api.semaphores[getBatchWitness] if semaphore != nil { @@ -989,14 +987,44 @@ func (api *ZkEvmAPIImpl) getBatchWitness(ctx context.Context, tx kv.Tx, batchNum if api.ethApi.historyV3(tx) { return nil, fmt.Errorf("not supported by Erigon3") } - - generator, fullWitness, err := api.buildGenerator(ctx, tx, mode) + reader := hermez_db.NewHermezDbReader(tx) + badBatch, err := reader.GetInvalidBatch(batchNum) if err != nil { return nil, err } - return generator.GetWitnessByBatch(tx, ctx, batchNum, debug, fullWitness) + if !badBatch { + blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum) + if err != nil { + return nil, err + } + if len(blockNumbers) == 0 { + return nil, fmt.Errorf("no blocks found for batch %d", batchNum) + } + var startBlock, endBlock uint64 + for _, blockNumber := range blockNumbers { + if startBlock == 0 || blockNumber < startBlock { + startBlock = blockNumber + } + if blockNumber > endBlock { + endBlock = blockNumber + } + } + + startBlockInt := rpc.BlockNumber(startBlock) + endBlockInt := rpc.BlockNumber(endBlock) + + startBlockRpc := rpc.BlockNumberOrHash{BlockNumber: &startBlockInt} + endBlockNrOrHash := rpc.BlockNumberOrHash{BlockNumber: &endBlockInt} + return api.getBlockRangeWitness(ctx, api.db, startBlockRpc, endBlockNrOrHash, debug, mode) + } else { + generator, fullWitness, err := api.buildGenerator(ctx, tx, mode) + if err != nil { + return nil, err + } + return generator.GetWitnessByBadBatch(tx, ctx, batchNum, debug, fullWitness) + } } func (api *ZkEvmAPIImpl) buildGenerator(ctx context.Context, tx kv.Tx, witnessMode WitnessMode) (*witness.Generator, bool, error) { @@ -1043,7 +1071,6 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s } endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { return nil, err } @@ -1052,6 +1079,41 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s return nil, fmt.Errorf("start block number must be less than or equal to end block number, start=%d end=%d", blockNr, endBlockNr) } + hermezDb := hermez_db.NewHermezDbReader(tx) + + // we only keep trimmed witnesses in the db + if witnessMode == WitnessModeTrimmed { + blockWitnesses := make([]*trie.Witness, 0, endBlockNr-blockNr+1) + //try to get them from the db, if all are available - do not unwind and generate + for blockNum := blockNr; blockNum <= endBlockNr; blockNum++ { + witnessBytes, err := hermezDb.GetWitnessCache(blockNum) + if err != nil { + return nil, err + } + + if len(witnessBytes) == 0 { + break + } + + blockWitness, err := witness.ParseWitnessFromBytes(witnessBytes, false) + if err != nil { + return nil, err + } + + blockWitnesses = append(blockWitnesses, blockWitness) + } + + if len(blockWitnesses) == int(endBlockNr-blockNr+1) { + // found all, calculate + baseWitness, err := witness.MergeWitnesses(ctx, blockWitnesses) + if err != nil { + return nil, err + } + + return witness.GetWitnessBytes(baseWitness, debug) + } + } + generator, fullWitness, err := api.buildGenerator(ctx, tx, witnessMode) if err != nil { return nil, err @@ -1296,11 +1358,6 @@ func getLastBlockInBatchNumber(tx kv.Tx, batchNumber uint64) (uint64, error) { return blocks[len(blocks)-1], nil } -func getAllBlocksInBatchNumber(tx kv.Tx, batchNumber uint64) ([]uint64, error) { - reader := hermez_db.NewHermezDbReader(tx) - return reader.GetL2BlockNosByBatch(batchNumber) -} - func getLatestBatchNumber(tx kv.Tx) (uint64, error) { c, err := tx.Cursor(hermez_db.BLOCKBATCHES) if err != nil { @@ -1374,68 +1431,6 @@ func getForkIntervals(tx kv.Tx) ([]rpc.ForkInterval, error) { return result, nil } -func convertTransactionsReceipts( - txs []eritypes.Transaction, - receipts eritypes.Receipts, - hermezReader hermez_db.HermezDbReader, - block eritypes.Block) ([]types.Transaction, error) { - if len(txs) != len(receipts) { - return nil, errors.New("transactions and receipts length mismatch") - } - - result := make([]types.Transaction, 0, len(txs)) - - for idx, tx := range txs { - effectiveGasPricePercentage, err := hermezReader.GetEffectiveGasPricePercentage(tx.Hash()) - if err != nil { - return nil, err - } - gasPrice := tx.GetPrice() - v, r, s := tx.RawSignatureValues() - var sender common.Address - - // TODO: senders! - - var receipt *types.Receipt - if len(receipts) > idx { - receipt = convertReceipt(receipts[idx], sender, tx.GetTo(), gasPrice, effectiveGasPricePercentage) - } - - bh := block.Hash() - blockNumber := block.NumberU64() - - tran := types.Transaction{ - Nonce: types.ArgUint64(tx.GetNonce()), - GasPrice: types.ArgBig(*gasPrice.ToBig()), - Gas: types.ArgUint64(tx.GetGas()), - To: tx.GetTo(), - Value: types.ArgBig(*tx.GetValue().ToBig()), - Input: tx.GetData(), - V: types.ArgBig(*v.ToBig()), - R: types.ArgBig(*r.ToBig()), - S: types.ArgBig(*s.ToBig()), - Hash: tx.Hash(), - From: sender, - BlockHash: &bh, - BlockNumber: types.ArgUint64Ptr(types.ArgUint64(blockNumber)), - TxIndex: types.ArgUint64Ptr(types.ArgUint64(idx)), - Type: types.ArgUint64(tx.Type()), - Receipt: receipt, - } - - cid := tx.GetChainID() - var cidAB *types.ArgBig - if cid.Cmp(uint256.NewInt(0)) != 0 { - cidAB = (*types.ArgBig)(cid.ToBig()) - tran.ChainID = cidAB - } - - result = append(result, tran) - } - - return result, nil -} - func convertBlockToRpcBlock( orig *eritypes.Block, receipts eritypes.Receipts, @@ -1653,7 +1648,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) defer batch.Rollback() - if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { + if err = utils.PopulateMemoryMutationTables(batch); err != nil { return nil, err } @@ -1713,13 +1708,12 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, plainState := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainCfg.ChainName]) defer plainState.Close() - inclusion := make(map[libcommon.Address][]libcommon.Hash) + inclusion := make(map[common.Address][]common.Hash) for _, contract := range zkapi.config.WitnessContractInclusion { - err = plainState.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + if err = plainState.ForEachStorage(contract, common.Hash{}, func(key, secKey common.Hash, value uint256.Int) bool { inclusion[contract] = append(inclusion[contract], key) return false - }, math.MaxInt64) - if err != nil { + }, math.MaxInt64); err != nil { return nil, err } } @@ -1779,7 +1773,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, accProof := &accounts.SMTAccProofResult{ Address: address, Balance: (*hexutil.Big)(balance), - CodeHash: libcommon.BytesToHash(codeHash), + CodeHash: common.BytesToHash(codeHash), CodeLength: hexutil.Uint64(codeLength), Nonce: hexutil.Uint64(nonce), BalanceProof: balanceProofs, @@ -1921,7 +1915,7 @@ func (api *ZkEvmAPIImpl) GetRollupManagerAddress(ctx context.Context) (res json. return rollupManagerAddressJson, err } -func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string) (*libcommon.Hash, error) { +func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string) (*common.Hash, error) { res, err := client.JSONRPCCall(rpcUrl, "zkevm_getBatchByNumber", 1) if err != nil { return nil, err @@ -1948,7 +1942,7 @@ func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string return nil, fmt.Errorf("accInputHash is not a string") } - decoded := libcommon.HexToHash(hash) + decoded := common.HexToHash(hash) return &decoded, nil } diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index a585503c0e0..88d0deb1acb 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -80,6 +80,7 @@ func NewDefaultZkStages(ctx context.Context, ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), + zkStages.StageWitnessCfg(db, cfg.Zk, controlServer.ChainConfig, engine, blockReader, agg, cfg.HistoryV3, dirs, cfg.WitnessContractInclusion), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, cfg.Genesis.Config.NoPruneContracts), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), diff --git a/turbo/trie/witness.go b/turbo/trie/witness.go index 3f309be40e5..874fe5eb966 100644 --- a/turbo/trie/witness.go +++ b/turbo/trie/witness.go @@ -118,6 +118,8 @@ func NewWitnessFromReader(input io.Reader, trace bool) (*Witness, error) { op = &OperatorCode{} case OpBranch: op = &OperatorBranch{} + case OpSMTLeaf: + op = &OperatorSMTLeafValue{} case OpEmptyRoot: op = &OperatorEmptyRoot{} case OpExtension: @@ -173,81 +175,98 @@ func (w *Witness) WriteDiff(w2 *Witness, output io.Writer) { op = w.Operators[i] } if i >= len(w2.Operators) { - fmt.Fprintf(output, "unexpected o1[%d] = %T %v; o2[%d] = nil\n", i, op, op, i) + fmt.Fprintf(output, "missing in o2: o1[%d] = %T %v;\n", i, op, op) continue } + op2 := w2.Operators[i] switch o1 := op.(type) { case *OperatorBranch: - o2, ok := w2.Operators[i].(*OperatorBranch) + o2, ok := op2.(*OperatorBranch) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if o1.Mask != o2.Mask { - fmt.Fprintf(output, "o1[%d].Mask = %v; o2[%d].Mask = %v", i, o1.Mask, i, o2.Mask) + fmt.Fprintf(output, "OperatorBranch: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if o1.Mask != o2.Mask { + fmt.Fprintf(output, "OperatorBranch: o1[%d].Mask = %v; o2[%d].Mask = %v", i, o1.Mask, i, o2.Mask) } case *OperatorHash: - o2, ok := w2.Operators[i].(*OperatorHash) + o2, ok := op2.(*OperatorHash) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Hash.Bytes(), o2.Hash.Bytes()) { - fmt.Fprintf(output, "o1[%d].Hash = %s; o2[%d].Hash = %s\n", i, o1.Hash.Hex(), i, o2.Hash.Hex()) + fmt.Fprintf(output, "OperatorHash: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Hash.Bytes(), o2.Hash.Bytes()) { + fmt.Fprintf(output, "OperatorHash: o1[%d].Hash = %s; o2[%d].Hash = %s\n", i, o1.Hash.Hex(), i, o2.Hash.Hex()) } case *OperatorCode: - o2, ok := w2.Operators[i].(*OperatorCode) + o2, ok := op2.(*OperatorCode) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Code, o2.Code) { - fmt.Fprintf(output, "o1[%d].Code = %x; o2[%d].Code = %x\n", i, o1.Code, i, o2.Code) + fmt.Fprintf(output, "OperatorCode: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Code, o2.Code) { + fmt.Fprintf(output, "OperatorCode: o1[%d].Code = %x; o2[%d].Code = %x\n", i, o1.Code, i, o2.Code) } case *OperatorEmptyRoot: - o2, ok := w2.Operators[i].(*OperatorEmptyRoot) + _, ok := op2.(*OperatorEmptyRoot) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "OperatorEmptyRoot: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) } case *OperatorExtension: - o2, ok := w2.Operators[i].(*OperatorExtension) + o2, ok := op2.(*OperatorExtension) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "extension o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + fmt.Fprintf(output, "OperatorExtension: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorExtension: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) } case *OperatorLeafAccount: - o2, ok := w2.Operators[i].(*OperatorLeafAccount) + o2, ok := op2.(*OperatorLeafAccount) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "leafAcc o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) - } - if o1.Nonce != o2.Nonce { - fmt.Fprintf(output, "leafAcc o1[%d].Nonce = %v; o2[%d].Nonce = %v\n", i, o1.Nonce, i, o2.Nonce) - } - if o1.Balance.String() != o2.Balance.String() { - fmt.Fprintf(output, "leafAcc o1[%d].Balance = %v; o2[%d].Balance = %v\n", i, o1.Balance.String(), i, o2.Balance.String()) - } - if o1.HasCode != o2.HasCode { - fmt.Fprintf(output, "leafAcc o1[%d].HasCode = %v; o2[%d].HasCode = %v\n", i, o1.HasCode, i, o2.HasCode) - } - if o1.HasStorage != o2.HasStorage { - fmt.Fprintf(output, "leafAcc o1[%d].HasStorage = %v; o2[%d].HasStorage = %v\n", i, o1.HasStorage, i, o2.HasStorage) + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + } + if o1.Nonce != o2.Nonce { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Nonce = %v; o2[%d].Nonce = %v\n", i, o1.Nonce, i, o2.Nonce) + } + if o1.Balance.String() != o2.Balance.String() { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Balance = %v; o2[%d].Balance = %v\n", i, o1.Balance.String(), i, o2.Balance.String()) + } + if o1.HasCode != o2.HasCode { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].HasCode = %v; o2[%d].HasCode = %v\n", i, o1.HasCode, i, o2.HasCode) + } + if o1.HasStorage != o2.HasStorage { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].HasStorage = %v; o2[%d].HasStorage = %v\n", i, o1.HasStorage, i, o2.HasStorage) + } } case *OperatorLeafValue: - o2, ok := w2.Operators[i].(*OperatorLeafValue) + o2, ok := op2.(*OperatorLeafValue) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "OperatorLeafValue: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorLeafValue: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + } + if !bytes.Equal(o1.Value, o2.Value) { + fmt.Fprintf(output, "OperatorLeafValue: o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + } } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "leafVal o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) - } - if !bytes.Equal(o1.Value, o2.Value) { - fmt.Fprintf(output, "leafVal o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + case *OperatorSMTLeafValue: + o2, ok := op2.(*OperatorSMTLeafValue) + if !ok { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Address, o2.Address) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].Address = %x; o2[%d].Address = %x\n", i, o1.Address, i, o2.Address) + } + if !bytes.Equal(o1.StorageKey, o2.StorageKey) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].StorageKey = %x; o2[%d].StorageKey = %x\n", i, o1.StorageKey, i, o2.StorageKey) + } + if !bytes.Equal(o1.Value, o2.Value) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + } + if o1.NodeType != o2.NodeType { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].NodeType = %d; o2[%d].NodeType = %d\n", i, o1.NodeType, i, o2.NodeType) + } } + default: - o2 := w2.Operators[i] - fmt.Fprintf(output, "unexpected o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "unexpected operator: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) } } } diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 01e8bdfe3e4..d2eb4961500 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -51,7 +51,8 @@ const FORK_HISTORY = "fork_history" // index const JUST_UNWOUND = "just_unwound" // batch number -> true const PLAIN_STATE_VERSION = "plain_state_version" // batch number -> true const ERIGON_VERSIONS = "erigon_versions" // erigon version -> timestamp of startup -const BATCH_ENDS = "batch_ends" // +const BATCH_ENDS = "batch_ends" // batch number -> true +const WITNESS_CACHE = "witness_cache" // block number -> witness for 1 block var HermezDbTables = []string{ L1VERIFICATIONS, @@ -88,6 +89,7 @@ var HermezDbTables = []string{ PLAIN_STATE_VERSION, ERIGON_VERSIONS, BATCH_ENDS, + WITNESS_CACHE, } type HermezDb struct { @@ -1887,3 +1889,20 @@ func (db *HermezDbReader) getForkIntervals(forkIdFilter *uint64) ([]types.ForkIn return forkIntervals, nil } + +func (db *HermezDb) WriteWitnessCache(blockNo uint64, witnessBytes []byte) error { + key := Uint64ToBytes(blockNo) + return db.tx.Put(WITNESS_CACHE, key, witnessBytes) +} + +func (db *HermezDbReader) GetWitnessCache(blockNo uint64) ([]byte, error) { + v, err := db.tx.GetOne(WITNESS_CACHE, Uint64ToBytes(blockNo)) + if err != nil { + return nil, err + } + return v, nil +} + +func (db *HermezDb) DeleteWitnessCaches(from, to uint64) error { + return db.deleteFromBucketWithUintKeysRange(WITNESS_CACHE, from, to) +} diff --git a/zk/l1_data/l1_decoder.go b/zk/l1_data/l1_decoder.go index 4427d9760fa..003e9d0ec5d 100644 --- a/zk/l1_data/l1_decoder.go +++ b/zk/l1_data/l1_decoder.go @@ -14,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/da" - "github.com/ledgerwatch/erigon/zk/hermez_db" zktx "github.com/ledgerwatch/erigon/zk/tx" ) @@ -195,7 +194,12 @@ type DecodedL1Data struct { LimitTimestamp uint64 } -func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (*DecodedL1Data, error) { +type l1DecoderHermezReader interface { + GetL1BatchData(batchNo uint64) ([]byte, error) + GetLastL1BatchData() (uint64, error) +} + +func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader l1DecoderHermezReader) (*DecodedL1Data, error) { decoded := &DecodedL1Data{} // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync // stage, if it is not there we need to panic as we're in a bad state diff --git a/zk/smt/changes_getter.go b/zk/smt/changes_getter.go new file mode 100644 index 00000000000..0e89700e14a --- /dev/null +++ b/zk/smt/changes_getter.go @@ -0,0 +1,200 @@ +package smt + +import ( + "errors" + "fmt" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/kv" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/ledgerwatch/erigon/core/types/accounts" + + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/status-im/keycard-go/hexutils" +) + +var ( + ErrAlreadyOpened = errors.New("already opened") + ErrNotOpened = errors.New("not opened") +) + +type changesGetter struct { + tx kv.Tx + + ac kv.CursorDupSort + sc kv.CursorDupSort + psr *state.PlainState + currentPsr *state.PlainStateReader + + accChanges map[common.Address]*accounts.Account + codeChanges map[common.Address]string + storageChanges map[common.Address]map[string]string + + opened bool +} + +func NewChangesGetter(tx kv.Tx) *changesGetter { + return &changesGetter{ + tx: tx, + accChanges: make(map[common.Address]*accounts.Account), + codeChanges: make(map[common.Address]string), + storageChanges: make(map[common.Address]map[string]string), + } +} +func (cg *changesGetter) addDeletedAcc(addr common.Address) { + deletedAcc := new(accounts.Account) + deletedAcc.Balance = *uint256.NewInt(0) + deletedAcc.Nonce = 0 + cg.accChanges[addr] = deletedAcc +} + +func (cg *changesGetter) openChangesGetter(from uint64) error { + if cg.opened { + return ErrAlreadyOpened + } + + ac, err := cg.tx.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return fmt.Errorf("CursorDupSort: %w", err) + } + + sc, err := cg.tx.CursorDupSort(kv.StorageChangeSet) + if err != nil { + return fmt.Errorf("CursorDupSort: %w", err) + } + + cg.ac = ac + cg.sc = sc + cg.psr = state.NewPlainState(cg.tx, from, systemcontracts.SystemContractCodeLookup["Hermez"]) + cg.currentPsr = state.NewPlainStateReader(cg.tx) + + cg.opened = true + + return nil +} + +func (cg *changesGetter) closeChangesGetter() { + if cg.ac != nil { + cg.ac.Close() + } + + if cg.sc != nil { + cg.sc.Close() + } + + if cg.psr != nil { + cg.psr.Close() + } +} + +func (cg *changesGetter) getChangesForBlock(blockNum uint64) error { + if !cg.opened { + return ErrNotOpened + } + + cg.psr.SetBlockNr(blockNum) + dupSortKey := dbutils.EncodeBlockNumber(blockNum) + + // collect changes to accounts and code + for _, v, err2 := cg.ac.SeekExact(dupSortKey); err2 == nil && v != nil; _, v, err2 = cg.ac.NextDup() { + if err := cg.setAccountChangesFromV(v); err != nil { + return fmt.Errorf("failed to get account changes: %w", err) + } + } + + if err := cg.tx.ForPrefix(kv.StorageChangeSet, dupSortKey, cg.setStorageChangesFromKv); err != nil { + return fmt.Errorf("failed to get storage changes: %w", err) + } + + return nil +} + +func (cg *changesGetter) setAccountChangesFromV(v []byte) error { + addr := common.BytesToAddress(v[:length.Addr]) + + // if the account was created in this changeset we should delete it + if len(v[length.Addr:]) == 0 { + cg.codeChanges[addr] = "" + cg.addDeletedAcc(addr) + return nil + } + + oldAcc, err := cg.psr.ReadAccountData(addr) + if err != nil { + return fmt.Errorf("ReadAccountData: %w", err) + } + + // currAcc at block we're unwinding from + currAcc, err := cg.currentPsr.ReadAccountData(addr) + if err != nil { + return fmt.Errorf("ReadAccountData: %w", err) + } + + if oldAcc.Incarnation > 0 { + if len(v) == 0 { // self-destructed + cg.addDeletedAcc(addr) + } else { + if currAcc.Incarnation > oldAcc.Incarnation { + cg.addDeletedAcc(addr) + } + } + } + + // store the account + cg.accChanges[addr] = oldAcc + + if oldAcc.CodeHash != currAcc.CodeHash { + hexcc, err := cg.getCodehashChanges(addr, oldAcc) + if err != nil { + return fmt.Errorf("getCodehashChanges: %w", err) + } + cg.codeChanges[addr] = hexcc + } + + return nil +} + +func (cg *changesGetter) getCodehashChanges(addr common.Address, oldAcc *accounts.Account) (string, error) { + cc, err := cg.currentPsr.ReadAccountCode(addr, oldAcc.Incarnation, oldAcc.CodeHash) + if err != nil { + return "", fmt.Errorf("ReadAccountCode: %w", err) + } + + ach := hexutils.BytesToHex(cc) + hexcc := "" + if len(ach) > 0 { + hexcc = "0x" + ach + } + + return hexcc, nil +} + +func (cg *changesGetter) setStorageChangesFromKv(sk, sv []byte) error { + changesetKey := sk[length.BlockNum:] + address, _ := dbutils.PlainParseStoragePrefix(changesetKey) + + sstorageKey := sv[:length.Hash] + stk := common.BytesToHash(sstorageKey) + + value := []byte{0} + if len(sv[length.Hash:]) != 0 { + value = sv[length.Hash:] + } + + stkk := fmt.Sprintf("0x%032x", stk) + v := fmt.Sprintf("0x%032x", common.BytesToHash(value)) + + m := make(map[string]string) + m[stkk] = v + + if cg.storageChanges[address] == nil { + cg.storageChanges[address] = make(map[string]string) + } + cg.storageChanges[address][stkk] = v + + return nil +} diff --git a/zk/smt/unwind_smt.go b/zk/smt/unwind_smt.go new file mode 100644 index 00000000000..e02203ce115 --- /dev/null +++ b/zk/smt/unwind_smt.go @@ -0,0 +1,91 @@ +package smt + +import ( + "context" + "fmt" + "math" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + + "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/erigon/zk" + "github.com/ledgerwatch/erigon/zkevm/log" +) + +func UnwindZkSMT(ctx context.Context, logPrefix string, from, to uint64, tx kv.RwTx, checkRoot bool, expectedRootHash *common.Hash, quiet bool) (common.Hash, error) { + if !quiet { + log.Info(fmt.Sprintf("[%s] Unwind trie hashes started", logPrefix)) + defer log.Info(fmt.Sprintf("[%s] Unwind ended", logPrefix)) + } + + eridb := db2.NewEriDb(tx) + eridb.RollbackBatch() + + dbSmt := smt.NewSMT(eridb, false) + + if !quiet { + log.Info(fmt.Sprintf("[%s]", logPrefix), "last root", common.BigToHash(dbSmt.LastRoot())) + } + + // only open the batch if tx is not already one + if _, ok := tx.(*membatchwithdb.MemoryMutation); !ok { + quit := make(chan struct{}) + eridb.OpenBatch(quit) + } + + changesGetter := NewChangesGetter(tx) + if err := changesGetter.openChangesGetter(from); err != nil { + return trie.EmptyRoot, fmt.Errorf("OpenChangesGetter: %w", err) + } + defer changesGetter.closeChangesGetter() + + total := uint64(math.Abs(float64(from) - float64(to) + 1)) + progressChan, stopPrinter := zk.ProgressPrinter(fmt.Sprintf("[%s] Progress unwinding", logPrefix), total, quiet) + defer stopPrinter() + + // walk backwards through the blocks, applying state changes, and deletes + // PlainState contains data AT the block + // History tables contain data BEFORE the block - so need a +1 offset + for i := from; i >= to+1; i-- { + select { + case <-ctx.Done(): + return trie.EmptyRoot, fmt.Errorf("context done") + default: + } + + if err := changesGetter.getChangesForBlock(i); err != nil { + return trie.EmptyRoot, fmt.Errorf("getChangesForBlock: %w", err) + } + + progressChan <- 1 + } + + stopPrinter() + + if _, _, err := dbSmt.SetStorage(ctx, logPrefix, changesGetter.accChanges, changesGetter.codeChanges, changesGetter.storageChanges); err != nil { + return trie.EmptyRoot, err + } + + lr := dbSmt.LastRoot() + + hash := common.BigToHash(lr) + if checkRoot && hash != *expectedRootHash { + log.Error("failed to verify hash") + return trie.EmptyRoot, fmt.Errorf("wrong trie root: %x, expected (from header): %x", hash, expectedRootHash) + } + + if !quiet { + log.Info(fmt.Sprintf("[%s] Trie root matches", logPrefix), "hash", hash.Hex()) + } + + if err := eridb.CommitBatch(); err != nil { + return trie.EmptyRoot, err + } + + return hash, nil +} diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index b4cd61c10d3..c2f708f6967 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -3,9 +3,7 @@ package stages import ( "fmt" - "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/state" @@ -25,10 +23,7 @@ import ( "os" - "math" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" - "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -39,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/erigon/zk" + zkSmt "github.com/ledgerwatch/erigon/zk/smt" "github.com/status-im/keycard-go/hexutils" ) @@ -81,7 +77,7 @@ func StageZkInterHashesCfg( } } -func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context) (root libcommon.Hash, err error) { +func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context) (root common.Hash, err error) { logPrefix := s.LogPrefix() quit := ctx.Done() @@ -90,7 +86,7 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin useExternalTx := tx != nil if !useExternalTx { var err error - tx, err = cfg.db.BeginRw(context.Background()) + tx, err = cfg.db.BeginRw(ctx) if err != nil { return trie.EmptyRoot, err } @@ -195,7 +191,6 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin } func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context, silent bool) (err error) { - quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -219,12 +214,9 @@ func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.St expectedRootHash = syncHeadHeader.Root } - root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, silent, quit) - if err != nil { + if _, err = zkSmt.UnwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, silent); err != nil { return err } - _ = root - hermezDb := hermez_db.NewHermezDb(tx) if err := hermezDb.TruncateSmtDepths(u.UnwindPoint); err != nil { return err @@ -454,197 +446,6 @@ func zkIncrementIntermediateHashes(ctx context.Context, logPrefix string, s *sta return hash, nil } -func unwindZkSMT(ctx context.Context, logPrefix string, from, to uint64, db kv.RwTx, checkRoot bool, expectedRootHash *common.Hash, quiet bool, quit <-chan struct{}) (common.Hash, error) { - if !quiet { - log.Info(fmt.Sprintf("[%s] Unwind trie hashes started", logPrefix)) - defer log.Info(fmt.Sprintf("[%s] Unwind ended", logPrefix)) - } - - eridb := db2.NewEriDb(db) - dbSmt := smt.NewSMT(eridb, false) - - if !quiet { - log.Info(fmt.Sprintf("[%s]", logPrefix), "last root", common.BigToHash(dbSmt.LastRoot())) - } - - if quit == nil { - log.Warn("quit channel is nil, creating a new one") - quit = make(chan struct{}) - } - - // only open the batch if tx is not already one - if _, ok := db.(*membatchwithdb.MemoryMutation); !ok { - eridb.OpenBatch(quit) - } - - ac, err := db.CursorDupSort(kv.AccountChangeSet) - if err != nil { - return trie.EmptyRoot, err - } - defer ac.Close() - - sc, err := db.CursorDupSort(kv.StorageChangeSet) - if err != nil { - return trie.EmptyRoot, err - } - defer sc.Close() - - currentPsr := state2.NewPlainStateReader(db) - - total := uint64(math.Abs(float64(from) - float64(to) + 1)) - printerStopped := false - progressChan, stopPrinter := zk.ProgressPrinter(fmt.Sprintf("[%s] Progress unwinding", logPrefix), total, quiet) - defer func() { - if !printerStopped { - stopPrinter() - } - }() - - // walk backwards through the blocks, applying state changes, and deletes - // PlainState contains data AT the block - // History tables contain data BEFORE the block - so need a +1 offset - accChanges := make(map[common.Address]*accounts.Account) - codeChanges := make(map[common.Address]string) - storageChanges := make(map[common.Address]map[string]string) - - addDeletedAcc := func(addr common.Address) { - deletedAcc := new(accounts.Account) - deletedAcc.Balance = *uint256.NewInt(0) - deletedAcc.Nonce = 0 - accChanges[addr] = deletedAcc - } - - psr := state2.NewPlainState(db, from, systemcontracts.SystemContractCodeLookup["Hermez"]) - defer psr.Close() - - for i := from; i >= to+1; i-- { - select { - case <-ctx.Done(): - return trie.EmptyRoot, fmt.Errorf("[%s] Context done", logPrefix) - default: - } - - psr.SetBlockNr(i) - - dupSortKey := dbutils.EncodeBlockNumber(i) - - // collect changes to accounts and code - for _, v, err2 := ac.SeekExact(dupSortKey); err2 == nil && v != nil; _, v, err2 = ac.NextDup() { - - addr := common.BytesToAddress(v[:length.Addr]) - - // if the account was created in this changeset we should delete it - if len(v[length.Addr:]) == 0 { - codeChanges[addr] = "" - addDeletedAcc(addr) - continue - } - - oldAcc, err := psr.ReadAccountData(addr) - if err != nil { - return trie.EmptyRoot, err - } - - // currAcc at block we're unwinding from - currAcc, err := currentPsr.ReadAccountData(addr) - if err != nil { - return trie.EmptyRoot, err - } - - if oldAcc.Incarnation > 0 { - if len(v) == 0 { // self-destructed - addDeletedAcc(addr) - } else { - if currAcc.Incarnation > oldAcc.Incarnation { - addDeletedAcc(addr) - } - } - } - - // store the account - accChanges[addr] = oldAcc - - if oldAcc.CodeHash != currAcc.CodeHash { - cc, err := currentPsr.ReadAccountCode(addr, oldAcc.Incarnation, oldAcc.CodeHash) - if err != nil { - return trie.EmptyRoot, err - } - - ach := hexutils.BytesToHex(cc) - hexcc := "" - if len(ach) > 0 { - hexcc = "0x" + ach - } - codeChanges[addr] = hexcc - } - } - - err = db.ForPrefix(kv.StorageChangeSet, dupSortKey, func(sk, sv []byte) error { - changesetKey := sk[length.BlockNum:] - address, _ := dbutils.PlainParseStoragePrefix(changesetKey) - - sstorageKey := sv[:length.Hash] - stk := common.BytesToHash(sstorageKey) - - value := []byte{0} - if len(sv[length.Hash:]) != 0 { - value = sv[length.Hash:] - } - - stkk := fmt.Sprintf("0x%032x", stk) - v := fmt.Sprintf("0x%032x", common.BytesToHash(value)) - - m := make(map[string]string) - m[stkk] = v - - if storageChanges[address] == nil { - storageChanges[address] = make(map[string]string) - } - storageChanges[address][stkk] = v - return nil - }) - if err != nil { - return trie.EmptyRoot, err - } - - progressChan <- 1 - } - - stopPrinter() - printerStopped = true - - if _, _, err := dbSmt.SetStorage(ctx, logPrefix, accChanges, codeChanges, storageChanges); err != nil { - return trie.EmptyRoot, err - } - - if err := verifyLastHash(dbSmt, expectedRootHash, checkRoot, logPrefix, quiet); err != nil { - log.Error("failed to verify hash") - eridb.RollbackBatch() - return trie.EmptyRoot, err - } - - if err := eridb.CommitBatch(); err != nil { - return trie.EmptyRoot, err - } - - lr := dbSmt.LastRoot() - - hash := common.BigToHash(lr) - return hash, nil -} - -func verifyLastHash(dbSmt *smt.SMT, expectedRootHash *common.Hash, checkRoot bool, logPrefix string, quiet bool) error { - hash := common.BigToHash(dbSmt.LastRoot()) - - if checkRoot && hash != *expectedRootHash { - panic(fmt.Sprintf("[%s] Wrong trie root: %x, expected (from header): %x", logPrefix, hash, expectedRootHash)) - } - if !quiet { - log.Info(fmt.Sprintf("[%s] Trie root matches", logPrefix), "hash", hash.Hex()) - } - return nil -} - func processAccount(db smt.DB, a *accounts.Account, as map[string]string, inc uint64, psr *state2.PlainStateReader, addr common.Address, keys []utils.NodeKey) ([]utils.NodeKey, error) { // get the account balance and nonce keys, err := insertAccountStateToKV(db, keys, addr.String(), a.Balance.ToBig(), new(big.Int).SetUint64(a.Nonce)) diff --git a/zk/stages/stage_witness.go b/zk/stages/stage_witness.go new file mode 100644 index 00000000000..34f928ef6e7 --- /dev/null +++ b/zk/stages/stage_witness.go @@ -0,0 +1,327 @@ +package stages + +import ( + "context" + "fmt" + "time" + + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + eristate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/systemcontracts" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + zkUtils "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/erigon/zk/witness" + + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/log/v3" +) + +type WitnessDb interface { +} + +type WitnessCfg struct { + db kv.RwDB + zkCfg *ethconfig.Zk + chainConfig *chain.Config + engine consensus.Engine + blockReader services.FullBlockReader + agg *eristate.Aggregator + historyV3 bool + dirs datadir.Dirs + forcedContracs []common.Address +} + +func StageWitnessCfg(db kv.RwDB, zkCfg *ethconfig.Zk, chainConfig *chain.Config, engine consensus.Engine, blockReader services.FullBlockReader, agg *eristate.Aggregator, historyV3 bool, dirs datadir.Dirs, forcedContracs []common.Address) WitnessCfg { + cfg := WitnessCfg{ + db: db, + zkCfg: zkCfg, + chainConfig: chainConfig, + engine: engine, + blockReader: blockReader, + agg: agg, + historyV3: historyV3, + dirs: dirs, + forcedContracs: forcedContracs, + } + + return cfg +} + +// /////////////////////////////////////////// +// 1. Check to which block it should calculate witnesses +// 2. Unwind to that block +// 3. Calculate witnesses up to current executed block +// 4. Delete old block witnesses +// //////////////////////////////////////////// +func SpawnStageWitness( + s *stagedsync.StageState, + u stagedsync.Unwinder, + ctx context.Context, + tx kv.RwTx, + cfg WitnessCfg, +) error { + logPrefix := s.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + log.Info(fmt.Sprintf("[%s] Starting witness cache stage", logPrefix)) + if sequencer.IsSequencer() { + log.Info(fmt.Sprintf("[%s] skipping -- sequencer", logPrefix)) + return nil + } + defer log.Info(fmt.Sprintf("[%s] Finished witness cache stage", logPrefix)) + + freshTx := false + if tx == nil { + freshTx = true + log.Debug(fmt.Sprintf("[%s] no tx provided, creating a new one", logPrefix)) + var err error + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return fmt.Errorf("cfg.db.BeginRw, %w", err) + } + defer tx.Rollback() + } + + stageWitnessProgressBlockNo, err := stages.GetStageProgress(tx, stages.Witness) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + stageInterhashesProgressBlockNo, err := stages.GetStageProgress(tx, stages.IntermediateHashes) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + if stageInterhashesProgressBlockNo <= stageWitnessProgressBlockNo { + log.Info(fmt.Sprintf("[%s] Skipping stage, no new blocks", logPrefix)) + return nil + } + + unwindPoint := stageWitnessProgressBlockNo + if stageInterhashesProgressBlockNo-cfg.zkCfg.WitnessCacheLimit > unwindPoint { + unwindPoint = stageInterhashesProgressBlockNo - cfg.zkCfg.WitnessCacheLimit + } + + //get unwind point to be end of previous batch + hermezDb := hermez_db.NewHermezDb(tx) + blocks, err := getBlocks(tx, unwindPoint, stageInterhashesProgressBlockNo) + if err != nil { + return fmt.Errorf("getBlocks: %w", err) + } + + // generator := witness.NewGenerator(cfg.dirs, cfg.historyV3, cfg.agg, cfg.blockReader, cfg.chainConfig, cfg.zkCfg, cfg.engine) + memTx := membatchwithdb.NewMemoryBatchWithSize(tx, cfg.dirs.Tmp, cfg.zkCfg.WitnessMemdbSize) + defer memTx.Rollback() + if err := zkUtils.PopulateMemoryMutationTables(memTx); err != nil { + return fmt.Errorf("PopulateMemoryMutationTables: %w", err) + } + memHermezDb := hermez_db.NewHermezDbReader(memTx) + + log.Info(fmt.Sprintf("[%s] Unwinding tree and hashess for witness generation", logPrefix), "from", unwindPoint, "to", stageInterhashesProgressBlockNo) + if err := witness.UnwindForWitness(ctx, memTx, unwindPoint, stageInterhashesProgressBlockNo, cfg.dirs, cfg.historyV3, cfg.agg); err != nil { + return fmt.Errorf("UnwindForWitness: %w", err) + } + log.Info(fmt.Sprintf("[%s] Unwind done", logPrefix)) + startBlock := blocks[0].NumberU64() + + prevHeader, err := cfg.blockReader.HeaderByNumber(ctx, tx, startBlock-1) + if err != nil { + return fmt.Errorf("blockReader.HeaderByNumber: %w", err) + } + + getHeader := func(hash common.Hash, number uint64) *eritypes.Header { + h, e := cfg.blockReader.Header(ctx, tx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h + } + + reader := state.NewPlainState(tx, blocks[0].NumberU64(), systemcontracts.SystemContractCodeLookup[cfg.chainConfig.ChainName]) + defer reader.Close() + prevStateRoot := prevHeader.Root + + log.Info(fmt.Sprintf("[%s] Executing blocks and collecting witnesses", logPrefix), "from", startBlock, "to", stageInterhashesProgressBlockNo) + + now := time.Now() + for _, block := range blocks { + reader.SetBlockNr(block.NumberU64()) + tds := state.NewTrieDbState(prevHeader.Root, tx, startBlock-1, nil) + tds.SetResolveReads(true) + tds.StartNewBuffer() + tds.SetStateReader(reader) + + trieStateWriter := tds.NewTrieStateWriter() + if err := witness.PrepareGersForWitness(block, memHermezDb, tds, trieStateWriter); err != nil { + return fmt.Errorf("PrepareGersForWitness: %w", err) + } + + getHashFn := core.GetHashFn(block.Header(), getHeader) + + chainReader := stagedsync.NewChainReaderImpl(cfg.chainConfig, tx, nil, log.New()) + + vmConfig := vm.Config{} + if _, err = core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot); err != nil { + return fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) + } + + prevStateRoot = block.Root() + + w, err := witness.BuildWitnessFromTrieDbState(ctx, memTx, tds, reader, cfg.forcedContracs, false) + if err != nil { + return fmt.Errorf("BuildWitnessFromTrieDbState: %w", err) + } + + bytes, err := witness.GetWitnessBytes(w, false) + if err != nil { + return fmt.Errorf("GetWitnessBytes: %w", err) + } + + if hermezDb.WriteWitnessCache(block.NumberU64(), bytes); err != nil { + return fmt.Errorf("WriteWitnessCache: %w", err) + } + if time.Since(now) > 10*time.Second { + log.Info(fmt.Sprintf("[%s] Executing blocks and collecting witnesses", logPrefix), "block", block.NumberU64()) + now = time.Now() + } + } + log.Info(fmt.Sprintf("[%s] Witnesses collected", logPrefix)) + + // delete cache for blocks lower than the limit + log.Info(fmt.Sprintf("[%s] Deleting old witness caches", logPrefix)) + if err := hermezDb.DeleteWitnessCaches(0, stageInterhashesProgressBlockNo-cfg.zkCfg.WitnessCacheLimit); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + if err := stages.SaveStageProgress(tx, stages.Witness, stageInterhashesProgressBlockNo); err != nil { + return fmt.Errorf("SaveStageProgress: %w", err) + } + + log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "lastBlockNumber", stageInterhashesProgressBlockNo) + + if freshTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + + return nil +} + +func getBlocks(tx kv.Tx, startBlock, endBlock uint64) (blocks []*eritypes.Block, err error) { + idx := 0 + blocks = make([]*eritypes.Block, endBlock-startBlock+1) + for blockNum := startBlock; blockNum <= endBlock; blockNum++ { + block, err := rawdb.ReadBlockByNumber(tx, blockNum) + if err != nil { + return nil, fmt.Errorf("ReadBlockByNumber: %w", err) + } + blocks[idx] = block + idx++ + } + + return blocks, nil +} + +func UnwindWitnessStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg WitnessCfg, ctx context.Context) (err error) { + logPrefix := u.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + useExternalTx := tx != nil + if !useExternalTx { + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) + } + defer tx.Rollback() + } + + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + + fromBlock := u.UnwindPoint + 1 + toBlock := u.CurrentBlockNumber + log.Info(fmt.Sprintf("[%s] Unwinding witness cache stage from block number", logPrefix), "fromBlock", fromBlock, "toBlock", toBlock) + defer log.Info(fmt.Sprintf("[%s] Unwinding witness cache complete", logPrefix)) + + hermezDb := hermez_db.NewHermezDb(tx) + if err := hermezDb.DeleteWitnessCaches(fromBlock, toBlock); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + if err := stages.SaveStageProgress(tx, stages.Witness, fromBlock); err != nil { + return fmt.Errorf("SaveStageProgress: %w", err) + } + + if err := u.Done(tx); err != nil { + return fmt.Errorf("u.Done: %w", err) + } + if !useExternalTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + return nil +} + +func PruneWitnessStage(s *stagedsync.PruneState, tx kv.RwTx, cfg WitnessCfg, ctx context.Context) (err error) { + logPrefix := s.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) + } + defer tx.Rollback() + } + + log.Info(fmt.Sprintf("[%s] Pruning witnes caches...", logPrefix)) + defer log.Info(fmt.Sprintf("[%s] Pruning witnes caches complete", logPrefix)) + + hermezDb := hermez_db.NewHermezDb(tx) + + toBlock, err := stages.GetStageProgress(tx, stages.Witness) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + if err := hermezDb.DeleteWitnessCaches(0, toBlock); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "stageProgress", 0) + if err := stages.SaveStageProgress(tx, stages.Witness, 0); err != nil { + return fmt.Errorf("SaveStageProgress: %v", err) + } + + if !useExternalTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + return nil +} diff --git a/zk/stages/stages.go b/zk/stages/stages.go index 4ada15e99ec..3e0097dd642 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -233,6 +233,7 @@ func DefaultZkStages( exec stages.ExecuteBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, + stageWitnessCfg WitnessCfg, history stages.HistoryCfg, logIndex stages.LogIndexCfg, callTraces stages.CallTracesCfg, @@ -439,6 +440,20 @@ func DefaultZkStages( return nil }, }, + { + ID: stages2.Witness, + Description: "Generate witness caches for each block", + Disabled: false, + Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnStageWitness(s, u, ctx, txc.Tx, stageWitnessCfg) + }, + Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindWitnessStage(u, txc.Tx, stageWitnessCfg, ctx) + }, + Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneWitnessStage(p, tx, stageWitnessCfg, ctx) + }, + }, { ID: stages2.Finish, Description: "Final: update current block for the RPC API", diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 5ae7ac04bcf..66346367db4 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -1,15 +1,15 @@ package witness import ( - "bytes" "context" "errors" "fmt" "math/big" "time" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -23,20 +23,14 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - db2 "github.com/ledgerwatch/erigon/smt/pkg/db" - "github.com/ledgerwatch/erigon/smt/pkg/smt" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/trie" - dstypes "github.com/ledgerwatch/erigon/zk/datastream/types" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1_data" - zkStages "github.com/ledgerwatch/erigon/zk/stages" zkUtils "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" - "github.com/holiman/uint256" - "math" ) var ( @@ -54,7 +48,7 @@ type Generator struct { chainCfg *chain.Config zkConfig *ethconfig.Zk engine consensus.EngineReader - forcedContracts []libcommon.Address + forcedContracts []common.Address } func NewGenerator( @@ -65,7 +59,7 @@ func NewGenerator( chainCfg *chain.Config, zkConfig *ethconfig.Zk, engine consensus.EngineReader, - forcedContracs []libcommon.Address, + forcedContracs []common.Address, ) *Generator { return &Generator{ dirs: dirs, @@ -79,80 +73,55 @@ func NewGenerator( } } -func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) (witness []byte, err error) { - t := zkUtils.StartTimer("witness", "getwitnessbybatch") +func (g *Generator) GetWitnessByBadBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) (witness []byte, err error) { + t := zkUtils.StartTimer("witness", "getwitnessbybadbatch") defer t.LogTimer() reader := hermez_db.NewHermezDbReader(tx) - badBatch, err := reader.GetInvalidBatch(batchNum) + // we need the header of the block prior to this batch to build up the blocks + previousHeight, _, err := reader.GetHighestBlockInBatch(batchNum - 1) if err != nil { return nil, err } - if badBatch { - // we need the header of the block prior to this batch to build up the blocks - previousHeight, _, err := reader.GetHighestBlockInBatch(batchNum - 1) - if err != nil { - return nil, err - } - previousHeader := rawdb.ReadHeaderByNumber(tx, previousHeight) - if previousHeader == nil { - return nil, fmt.Errorf("failed to get header for block %d", previousHeight) - } + previousHeader := rawdb.ReadHeaderByNumber(tx, previousHeight) + if previousHeader == nil { + return nil, fmt.Errorf("failed to get header for block %d", previousHeight) + } - // 1. get l1 batch data for the bad batch - fork, err := reader.GetForkId(batchNum) - if err != nil { - return nil, err - } + // 1. get l1 batch data for the bad batch + fork, err := reader.GetForkId(batchNum) + if err != nil { + return nil, err + } - decoded, err := l1_data.BreakDownL1DataByBatch(batchNum, fork, reader) - if err != nil { - return nil, err - } + decoded, err := l1_data.BreakDownL1DataByBatch(batchNum, fork, reader) + if err != nil { + return nil, err + } - nextNum := previousHeader.Number.Uint64() - parentHash := previousHeader.Hash() - timestamp := previousHeader.Time - blocks := make([]*eritypes.Block, len(decoded.DecodedData)) - for i, d := range decoded.DecodedData { - timestamp += uint64(d.DeltaTimestamp) - nextNum++ - newHeader := &eritypes.Header{ - ParentHash: parentHash, - Coinbase: decoded.Coinbase, - Difficulty: new(big.Int).SetUint64(0), - Number: new(big.Int).SetUint64(nextNum), - GasLimit: zkUtils.GetBlockGasLimitForFork(fork), - Time: timestamp, - } - - parentHash = newHeader.Hash() - transactions := d.Transactions - block := eritypes.NewBlock(newHeader, transactions, nil, nil, nil) - blocks[i] = block + nextNum := previousHeader.Number.Uint64() + parentHash := previousHeader.Hash() + timestamp := previousHeader.Time + blocks := make([]*eritypes.Block, len(decoded.DecodedData)) + for i, d := range decoded.DecodedData { + timestamp += uint64(d.DeltaTimestamp) + nextNum++ + newHeader := &eritypes.Header{ + ParentHash: parentHash, + Coinbase: decoded.Coinbase, + Difficulty: new(big.Int).SetUint64(0), + Number: new(big.Int).SetUint64(nextNum), + GasLimit: zkUtils.GetBlockGasLimitForFork(fork), + Time: timestamp, } - return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) - } else { - blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum) - if err != nil { - return nil, err - } - if len(blockNumbers) == 0 { - return nil, fmt.Errorf("no blocks found for batch %d", batchNum) - } - blocks := make([]*eritypes.Block, len(blockNumbers)) - idx := 0 - for _, blockNum := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(tx, blockNum) - if err != nil { - return nil, err - } - blocks[idx] = block - idx++ - } - return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) + parentHash = newHeader.Hash() + transactions := d.Transactions + block := eritypes.NewBlock(newHeader, transactions, nil, nil, nil) + blocks[i] = block } + + return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) } func (g *Generator) GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startBlock, endBlock uint64, debug, witnessFull bool) ([]byte, error) { @@ -164,9 +133,10 @@ func (g *Generator) GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startB } if endBlock == 0 { witness := trie.NewWitness([]trie.WitnessOperator{}) - return getWitnessBytes(witness, debug) + return GetWitnessBytes(witness, debug) } hermezDb := hermez_db.NewHermezDbReader(tx) + idx := 0 blocks := make([]*eritypes.Block, endBlock-startBlock+1) var firstBatch uint64 = 0 @@ -214,9 +184,9 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, endBlock) } - batch := membatchwithdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) - defer batch.Rollback() - if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { + rwtx := membatchwithdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) + defer rwtx.Rollback() + if err = zkUtils.PopulateMemoryMutationTables(rwtx); err != nil { return nil, err } @@ -230,21 +200,11 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", maxGetProofRewindBlockCount, latestBlock) } - unwindState := &stagedsync.UnwindState{UnwindPoint: startBlock - 1} - stageState := &stagedsync.StageState{BlockNumber: latestBlock} - - hashStageCfg := stagedsync.StageHashStateCfg(nil, g.dirs, g.historyV3, g.agg) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, log.New(), true); err != nil { - return nil, fmt.Errorf("unwind hash state: %w", err) + if err := UnwindForWitness(ctx, rwtx, startBlock, latestBlock, g.dirs, g.historyV3, g.agg); err != nil { + return nil, fmt.Errorf("UnwindForWitness: %w", err) } - interHashStageCfg := zkStages.StageZkInterHashesCfg(nil, true, true, false, g.dirs.Tmp, g.blockReader, nil, g.historyV3, g.agg, nil) - - if err = zkStages.UnwindZkIntermediateHashesStage(unwindState, stageState, batch, interHashStageCfg, ctx, true); err != nil { - return nil, fmt.Errorf("unwind intermediate hashes: %w", err) - } - - tx = batch + tx = rwtx } prevHeader, err := g.blockReader.HeaderByNumber(ctx, tx, startBlock-1) @@ -255,9 +215,9 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint tds := state.NewTrieDbState(prevHeader.Root, tx, startBlock-1, nil) tds.SetResolveReads(true) tds.StartNewBuffer() - trieStateWriter := tds.TrieStateWriter() + trieStateWriter := tds.NewTrieStateWriter() - getHeader := func(hash libcommon.Hash, number uint64) *eritypes.Header { + getHeader := func(hash common.Hash, number uint64) *eritypes.Header { h, e := g.blockReader.Header(ctx, tx, hash, number) if e != nil { log.Error("getHeader error", "number", number, "hash", hash, "err", e) @@ -278,48 +238,8 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint hermezDb := hermez_db.NewHermezDbReader(tx) - //[zkevm] get batches between last block and this one - // plus this blocks ger - lastBatchInserted, err := hermezDb.GetBatchNoByL2Block(blockNum - 1) - if err != nil { - return nil, fmt.Errorf("failed to get batch for block %d: %v", blockNum-1, err) - } - - currentBatch, err := hermezDb.GetBatchNoByL2Block(blockNum) - if err != nil { - return nil, fmt.Errorf("failed to get batch for block %d: %v", blockNum, err) - } - - gersInBetween, err := hermezDb.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) - if err != nil { - return nil, err - } - - var globalExitRoots []dstypes.GerUpdate - - if gersInBetween != nil { - globalExitRoots = append(globalExitRoots, *gersInBetween...) - } - - blockGer, err := hermezDb.GetBlockGlobalExitRoot(blockNum) - if err != nil { - return nil, err - } - emptyHash := libcommon.Hash{} - - if blockGer != emptyHash { - blockGerUpdate := dstypes.GerUpdate{ - GlobalExitRoot: blockGer, - Timestamp: block.Header().Time, - } - globalExitRoots = append(globalExitRoots, blockGerUpdate) - } - - for _, ger := range globalExitRoots { - // [zkevm] - add GER if there is one for this batch - if err := zkUtils.WriteGlobalExitRoot(tds, trieStateWriter, ger.GlobalExitRoot, ger.Timestamp); err != nil { - return nil, err - } + if err := PrepareGersForWitness(block, hermezDb, tds, trieStateWriter); err != nil { + return nil, fmt.Errorf("PrepareGersForWitness: %w", err) } engine, ok := g.engine.(consensus.Engine) @@ -328,60 +248,24 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("engine is not consensus.Engine") } - vmConfig := vm.Config{} - getHashFn := core.GetHashFn(block.Header(), getHeader) chainReader := stagedsync.NewChainReaderImpl(g.chainCfg, tx, nil, log.New()) - _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot) - if err != nil { - return nil, err + vmConfig := vm.Config{} + if _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot); err != nil { + return nil, fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) } prevStateRoot = block.Root() } - inclusion := make(map[libcommon.Address][]libcommon.Hash) - for _, contract := range g.forcedContracts { - err = reader.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { - inclusion[contract] = append(inclusion[contract], key) - return false - }, math.MaxInt64) - if err != nil { - return nil, err - } - } - - var rl trie.RetainDecider - // if full is true, we will send all the nodes to the witness - rl = &trie.AlwaysTrueRetainDecider{} - - if !witnessFull { - rl, err = tds.ResolveSMTRetainList(inclusion) - if err != nil { - return nil, err - } - } - - eridb := db2.NewEriDb(batch) - smtTrie := smt.NewSMT(eridb, false) - - witness, err := smt.BuildWitness(smtTrie, rl, ctx) + witness, err := BuildWitnessFromTrieDbState(ctx, rwtx, tds, reader, g.forcedContracts, witnessFull) if err != nil { - return nil, fmt.Errorf("build witness: %v", err) + return nil, fmt.Errorf("BuildWitnessFromTrieDbState: %w", err) } - return getWitnessBytes(witness, debug) -} - -func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { - var buf bytes.Buffer - _, err := witness.WriteInto(&buf, debug) - if err != nil { - return nil, err - } - return buf.Bytes(), nil + return GetWitnessBytes(witness, debug) } func (g *Generator) generateMockWitness(batchNum uint64, blocks []*eritypes.Block, debug bool) ([]byte, error) { diff --git a/zk/witness/witness_merge_test_data.go b/zk/witness/witness_merge_test_data.go new file mode 100644 index 00000000000..1bfe7b9cd14 --- /dev/null +++ b/zk/witness/witness_merge_test_data.go @@ -0,0 +1,8 @@ +package witness + +var ( + witness1 = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c0203020302030313f878d66e026ade0b2e4ffec8b114291f4d832aae729e6da9fe98f316651f67031b597cff5ad0e3ec8b6baa5f15993d6e950c3cf473b0c796f339d7e9e28da24002030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d02030203035d2afadc42d28ae8d74c7b5f96e56bcaecc01423bc9555ef9d9686271ddd238b033852af41b0f8f922418b3f525cd77f039ce8a0e41034e8a9c51de2daf331a7cc02030203020303dedf2c8185299a3cdcf5805fd538243eafabea31d99f30e0c56119453cbe0595035fd0c51fc95c362deb97f4d34a367c56d9c3bae67f33a75541d47299bf8c85d002030203033a34a2ec21ba01bdffa3e14bdc6234b1177f58fb0f8d20ded1e0d337abc9097303f2a2ca0856cfc4409a556f408436e6112049837ca240449b521ce77ded9bbb4502030203020302030355b79241b57362ed5a29e40e42647066862077123d3363d2776ae9a5235aa625031a0d841893cc3c11eefec6fcff6687e1f2d52c667b72e9896d185cfac2d52f200203020303267a5ba100569955e1248dd2758afbe9cabcc9fb5256aeadf2d9de2bd50fa9d3031c3657155c2172893ad0ceacfd6dbaac96e7450dd3572516816664bbad57307e0203020303bfdb95766031cea080daeba2879e20c2c9212e98699aa1a9ddd0f35b3f4f14d1031cb570e01fa4fd83227e9e7423cedcb4d1f2aa49a4b379bfb5532267cb41d1ed0203020303d26a86e0cde80dcb3dddc5689ee7aff7cc4aa63c69a65db071604f2d22821f5003453e11710c67ffb8aee8ecd4e3d9e482a3c3b6473055a8fda9141761be2a2cfd0203020303eed4e48df11288a42497f24b09a60c194737347e0f0324ace547906015c46763030f3541edd630e75e0ecfad8204446c4b04e707f29a911034b0d990df202058b6020302030357f21f30a7d272dc3b763a0ba582826c2888cd791ea5cfebf8c6eeba97688cff03942b80bd4855b40d077eb25e2677767cd9e3e32548b948133c53d5cfd98fb4120201020303039a912ac6df3a5af5d7cdbebd9c86dfc4d667901d38d17f5e265b4ec92851a3039a13ede5f8fe8fc936a9c053045c21b7cfac59232ed14acebe5a1270684c7ba402030366f89b9e4af2d9333431a7252441386158c0cd4f1416c432bbfeddeaf9a94fd303ea0e7f59ba22f8e1c16d8662786956816da4d6c44b3af63dbaeff9fa26ff58a8020303087927425293ead337b03b12cc3be21e324869c328321da791feace40848626c0320fde6ec582d5275f6c1b21b4ad7130f8e54c52d05483ef9effefa3cae9eaf51020303dd266e9e532095a3ef2479e8543f52ee9386405aadc619a2e962ad2f4ca7940003015c36f881ff87d7cdce55b31157699432c553b1c2be328b4b041688853ec960020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" + witness2 = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c02030203020302030344d578674b6be511832af115d2669571dda0918b6cc734aa4acda37260458f3303fa95439e418988542df6cc8a12cd2e59ddd44643f035364c14b146d8665ab538020302030203034f9b9d5ccea861bd0aa15a5fb7fecc1a6d49b66bc7eb1e8905701e3e5728957003a6bfd6ce49840bddcf6502294487dcf1e2b5d6b06100a0b1259dbe8c8bd8e44f0203020303dc08ac42d157ac7d835fabb64048b54993bf6636eff62863d99d2c8905f1e6050362a972a91cfac6bfbaf2c40c724f947a405ce6e647aac0a61ea8f467a49b41cc020302030203020303a4be360a2b33a98faf83a47c1611d2114b590f1a626d48f700e1b0d8361f65f6030e4a6c2e589051b01393778e2bd41764d047b0394238c514e3cff1bcd9f17fde0203020303a19150f49f5fa1e3a3e09c7b8e3a80ad9f901086b2acacc8a5712c945ab79d3903374e7d15b75adda866c38fbbe1cb5bcad247ad095de306706d40855b922df14f020302030203020302030354772bf7e2a00683456b752e674df624b9b8419fd126964d66a82f8ba678977a03dd8f48954ed2bb272c5b94a49d1ef09d545062536065580bbd306776bc135f8e02030203032108ea8ac4227399387099ff7faacb8c1e424f5543edb67d7d8ed0f04a4e0dfb0392659304959ceea896f45666a76214b0f96c0d0ac9ddb78a96f9a0271e7b579a02030203020303870c4f9820964a725c45a91364107661534dff05c30e966b1946f2157844ec0603bf64c46a8bfb74f75acb660d0a43078c21cdab2627c014fd463a56ad85cb7e6a020302030203034b81bf62e5171445bc7bb3e154c4236543feb39907364512e7f8bf3010d0bcd103c1e217970454c195c8cefeedb6eb556772703cdfcbb9473b1251407e3af45d4d0203020203a8dd420db1a92952522be68028b8762b9c2c45f11efe01d4e2b2a17a8aeca76202020203037c03317c701ee7c858e7c429134f07bc4f3bb40047681a2995924386b065a44003eeb2124d66ad9fe030707b71b337ead87239fbfec018f78a36cf83ffe6c1f3090203034479d72706bfadbfc681e4b1e0c17fd702e94ff5cce085697fa4915b9ddf8e5503978f813e60f47989d365c08ad74b7b5697ac63a4d729225fef5cbbf858dd9e360203031e3fe72c68bad17795f3ab1c89427a9db9297c750e25a03f4d5cc7f4300ccf25033477174075c81e1ea46067ae9766ac42b6e37b0122ca757914f2d38d5a5b0fd90203037c82934570e0e51dadfe294202f68ff1baa30ec7f3d972fd309af51bb73233b003c73c4ff799c5d7f7900bab9bed27acfd777778f080034d266e4b3a8cb275180e0203032ec060cb265b14a46177f0b9263af186c22d8fad7466efd3dda1a76839916f720322d842fbac43297665301e5a01f595b5961a7617045e6f90903794e64ae970f3020303bd26ad01b4a6d5fc9578bb889728e38b0cd1929f289dd0733beea3122035d8050305574e7ff67c46b4d58103152ffd94f950e5bf9a67a405de972849bfaa7a335e0203033c9f565b7511511ebda8b766512d87d572c4958008f933b4e55604a5f3c36e82036a24bb5153ae46e102a28f022b5305705a84d70a4d2d5b399a09ae90bec4c86d020303ca003945b6df159b5c900df34d54d18e81551ef946e6ec76aa5105912bd41228031937941108c7513a3bcf7e078b1b35a9816cf095dc7413079922c0eef235cd950203032c581d00b2b34c68be72f5453d8d67f30797a26d4b0df66f004fc0075cc8eb1003e71d380a7d686d28aca8fa3508c37b30fb5e30bcd348e19dfa6b547f2fda4fb602030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d020303fb752c12ae75e534126c45ee4aaa0e80c44afa5f5ac85f491d47c6c232479cb203f4091664c7e58a48ec6c8343fd713184f2195f17153a9b10439f3aa99461a425020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" + + resultWitness = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c02030203020302030344d578674b6be511832af115d2669571dda0918b6cc734aa4acda37260458f3303fa95439e418988542df6cc8a12cd2e59ddd44643f035364c14b146d8665ab538020302030203034f9b9d5ccea861bd0aa15a5fb7fecc1a6d49b66bc7eb1e8905701e3e5728957003a6bfd6ce49840bddcf6502294487dcf1e2b5d6b06100a0b1259dbe8c8bd8e44f0203020303dc08ac42d157ac7d835fabb64048b54993bf6636eff62863d99d2c8905f1e6050362a972a91cfac6bfbaf2c40c724f947a405ce6e647aac0a61ea8f467a49b41cc020302030203020303a4be360a2b33a98faf83a47c1611d2114b590f1a626d48f700e1b0d8361f65f6030e4a6c2e589051b01393778e2bd41764d047b0394238c514e3cff1bcd9f17fde0203020303a19150f49f5fa1e3a3e09c7b8e3a80ad9f901086b2acacc8a5712c945ab79d3903374e7d15b75adda866c38fbbe1cb5bcad247ad095de306706d40855b922df14f020302030203020302030354772bf7e2a00683456b752e674df624b9b8419fd126964d66a82f8ba678977a03dd8f48954ed2bb272c5b94a49d1ef09d545062536065580bbd306776bc135f8e02030203032108ea8ac4227399387099ff7faacb8c1e424f5543edb67d7d8ed0f04a4e0dfb0392659304959ceea896f45666a76214b0f96c0d0ac9ddb78a96f9a0271e7b579a02030203020303870c4f9820964a725c45a91364107661534dff05c30e966b1946f2157844ec0603bf64c46a8bfb74f75acb660d0a43078c21cdab2627c014fd463a56ad85cb7e6a020302030203034b81bf62e5171445bc7bb3e154c4236543feb39907364512e7f8bf3010d0bcd103c1e217970454c195c8cefeedb6eb556772703cdfcbb9473b1251407e3af45d4d0203020203a8dd420db1a92952522be68028b8762b9c2c45f11efe01d4e2b2a17a8aeca76202020203037c03317c701ee7c858e7c429134f07bc4f3bb40047681a2995924386b065a44003eeb2124d66ad9fe030707b71b337ead87239fbfec018f78a36cf83ffe6c1f3090203034479d72706bfadbfc681e4b1e0c17fd702e94ff5cce085697fa4915b9ddf8e5503978f813e60f47989d365c08ad74b7b5697ac63a4d729225fef5cbbf858dd9e360203031e3fe72c68bad17795f3ab1c89427a9db9297c750e25a03f4d5cc7f4300ccf25033477174075c81e1ea46067ae9766ac42b6e37b0122ca757914f2d38d5a5b0fd90203037c82934570e0e51dadfe294202f68ff1baa30ec7f3d972fd309af51bb73233b003c73c4ff799c5d7f7900bab9bed27acfd777778f080034d266e4b3a8cb275180e0203032ec060cb265b14a46177f0b9263af186c22d8fad7466efd3dda1a76839916f720322d842fbac43297665301e5a01f595b5961a7617045e6f90903794e64ae970f3020303bd26ad01b4a6d5fc9578bb889728e38b0cd1929f289dd0733beea3122035d8050305574e7ff67c46b4d58103152ffd94f950e5bf9a67a405de972849bfaa7a335e0203033c9f565b7511511ebda8b766512d87d572c4958008f933b4e55604a5f3c36e82036a24bb5153ae46e102a28f022b5305705a84d70a4d2d5b399a09ae90bec4c86d020303ca003945b6df159b5c900df34d54d18e81551ef946e6ec76aa5105912bd41228031937941108c7513a3bcf7e078b1b35a9816cf095dc7413079922c0eef235cd950203032c581d00b2b34c68be72f5453d8d67f30797a26d4b0df66f004fc0075cc8eb1003e71d380a7d686d28aca8fa3508c37b30fb5e30bcd348e19dfa6b547f2fda4fb602030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d02030203035d2afadc42d28ae8d74c7b5f96e56bcaecc01423bc9555ef9d9686271ddd238b033852af41b0f8f922418b3f525cd77f039ce8a0e41034e8a9c51de2daf331a7cc02030203020303dedf2c8185299a3cdcf5805fd538243eafabea31d99f30e0c56119453cbe0595035fd0c51fc95c362deb97f4d34a367c56d9c3bae67f33a75541d47299bf8c85d002030203033a34a2ec21ba01bdffa3e14bdc6234b1177f58fb0f8d20ded1e0d337abc9097303f2a2ca0856cfc4409a556f408436e6112049837ca240449b521ce77ded9bbb4502030203020302030355b79241b57362ed5a29e40e42647066862077123d3363d2776ae9a5235aa625031a0d841893cc3c11eefec6fcff6687e1f2d52c667b72e9896d185cfac2d52f200203020303267a5ba100569955e1248dd2758afbe9cabcc9fb5256aeadf2d9de2bd50fa9d3031c3657155c2172893ad0ceacfd6dbaac96e7450dd3572516816664bbad57307e0203020303bfdb95766031cea080daeba2879e20c2c9212e98699aa1a9ddd0f35b3f4f14d1031cb570e01fa4fd83227e9e7423cedcb4d1f2aa49a4b379bfb5532267cb41d1ed0203020303d26a86e0cde80dcb3dddc5689ee7aff7cc4aa63c69a65db071604f2d22821f5003453e11710c67ffb8aee8ecd4e3d9e482a3c3b6473055a8fda9141761be2a2cfd0203020303eed4e48df11288a42497f24b09a60c194737347e0f0324ace547906015c46763030f3541edd630e75e0ecfad8204446c4b04e707f29a911034b0d990df202058b6020302030357f21f30a7d272dc3b763a0ba582826c2888cd791ea5cfebf8c6eeba97688cff03942b80bd4855b40d077eb25e2677767cd9e3e32548b948133c53d5cfd98fb4120201020303039a912ac6df3a5af5d7cdbebd9c86dfc4d667901d38d17f5e265b4ec92851a3039a13ede5f8fe8fc936a9c053045c21b7cfac59232ed14acebe5a1270684c7ba402030366f89b9e4af2d9333431a7252441386158c0cd4f1416c432bbfeddeaf9a94fd303ea0e7f59ba22f8e1c16d8662786956816da4d6c44b3af63dbaeff9fa26ff58a8020303087927425293ead337b03b12cc3be21e324869c328321da791feace40848626c0320fde6ec582d5275f6c1b21b4ad7130f8e54c52d05483ef9effefa3cae9eaf51020303dd266e9e532095a3ef2479e8543f52ee9386405aadc619a2e962ad2f4ca7940003015c36f881ff87d7cdce55b31157699432c553b1c2be328b4b041688853ec960020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" +) diff --git a/zk/witness/witness_utils.go b/zk/witness/witness_utils.go new file mode 100644 index 00000000000..ce63342148e --- /dev/null +++ b/zk/witness/witness_utils.go @@ -0,0 +1,199 @@ +package witness + +import ( + "bytes" + "context" + "errors" + "fmt" + "math" + + "github.com/holiman/uint256" + coreState "github.com/ledgerwatch/erigon/core/state" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" + corestate "github.com/ledgerwatch/erigon/core/state" + + "github.com/ledgerwatch/erigon/core/rawdb" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + dstypes "github.com/ledgerwatch/erigon/zk/datastream/types" + zkSmt "github.com/ledgerwatch/erigon/zk/smt" + zkUtils "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" +) + +var ( + ErrNoWitnesses = errors.New("witness count is 0") +) + +func UnwindForWitness(ctx context.Context, tx kv.RwTx, startBlock, latestBlock uint64, dirs datadir.Dirs, historyV3 bool, agg *state.Aggregator) (err error) { + unwindState := &stagedsync.UnwindState{UnwindPoint: startBlock - 1} + stageState := &stagedsync.StageState{BlockNumber: latestBlock} + + hashStageCfg := stagedsync.StageHashStateCfg(nil, dirs, historyV3, agg) + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, tx, hashStageCfg, ctx, log.New(), true); err != nil { + return fmt.Errorf("UnwindHashStateStage: %w", err) + } + + var expectedRootHash common.Hash + syncHeadHeader, err := rawdb.ReadHeaderByNumber_zkevm(tx, unwindState.UnwindPoint) + if err != nil { + return fmt.Errorf("ReadHeaderByNumber_zkevm for block %d: %v", unwindState.UnwindPoint, err) + } + + if syncHeadHeader == nil { + log.Warn("header not found for block number", "block", unwindState.UnwindPoint) + } else { + expectedRootHash = syncHeadHeader.Root + } + + if _, err := zkSmt.UnwindZkSMT(ctx, "api.generateWitness", stageState.BlockNumber, unwindState.UnwindPoint, tx, true, &expectedRootHash, true); err != nil { + return fmt.Errorf("UnwindZkSMT: %w", err) + } + + return nil +} + +type gerForWitnessDb interface { + GetBatchNoByL2Block(blockNum uint64) (uint64, error) + GetBatchGlobalExitRoots(lastBatch, currentBatch uint64) (*[]dstypes.GerUpdate, error) + GetBlockGlobalExitRoot(blockNum uint64) (common.Hash, error) +} + +func PrepareGersForWitness(block *eritypes.Block, db gerForWitnessDb, tds *coreState.TrieDbState, trieStateWriter *coreState.TrieStateWriter) error { + blockNum := block.NumberU64() + //[zkevm] get batches between last block and this one + // plus this blocks ger + lastBatchInserted, err := db.GetBatchNoByL2Block(blockNum - 1) + if err != nil { + return fmt.Errorf("GetBatchNoByL2Block for block %d: %w", blockNum-1, err) + } + + currentBatch, err := db.GetBatchNoByL2Block(blockNum) + if err != nil { + return fmt.Errorf("GetBatchNoByL2Block for block %d: %v", blockNum, err) + } + + gersInBetween, err := db.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) + if err != nil { + return fmt.Errorf("GetBatchGlobalExitRoots for block %d: %v", blockNum, err) + } + + var globalExitRoots []dstypes.GerUpdate + + if gersInBetween != nil { + globalExitRoots = append(globalExitRoots, *gersInBetween...) + } + + blockGer, err := db.GetBlockGlobalExitRoot(blockNum) + if err != nil { + return fmt.Errorf("GetBlockGlobalExitRoot for block %d: %v", blockNum, err) + } + emptyHash := common.Hash{} + + if blockGer != emptyHash { + blockGerUpdate := dstypes.GerUpdate{ + GlobalExitRoot: blockGer, + Timestamp: block.Header().Time, + } + globalExitRoots = append(globalExitRoots, blockGerUpdate) + } + + for _, ger := range globalExitRoots { + // [zkevm] - add GER if there is one for this batch + if err := zkUtils.WriteGlobalExitRoot(tds, trieStateWriter, ger.GlobalExitRoot, ger.Timestamp); err != nil { + return fmt.Errorf("WriteGlobalExitRoot: %w", err) + } + } + + return nil +} + +type trieDbState interface { + ResolveSMTRetainList(inclusion map[common.Address][]common.Hash) (*trie.RetainList, error) +} + +func BuildWitnessFromTrieDbState(ctx context.Context, tx kv.Tx, tds trieDbState, reader *corestate.PlainState, forcedContracts []common.Address, witnessFull bool) (witness *trie.Witness, err error) { + var rl trie.RetainDecider + // if full is true, we will send all the nodes to the witness + rl = &trie.AlwaysTrueRetainDecider{} + + if !witnessFull { + inclusion := make(map[common.Address][]common.Hash) + for _, contract := range forcedContracts { + err = reader.ForEachStorage(contract, common.Hash{}, func(key, secKey common.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + + rl, err = tds.ResolveSMTRetainList(inclusion) + if err != nil { + return nil, err + } + } + + eridb := db2.NewRoEriDb(tx) + smtTrie := smt.NewRoSMT(eridb) + + if witness, err = smtTrie.BuildWitness(rl, ctx); err != nil { + return nil, fmt.Errorf("BuildWitness: %w", err) + } + + return +} + +func GetWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { + var buf bytes.Buffer + if _, err := witness.WriteInto(&buf, debug); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func ParseWitnessFromBytes(input []byte, trace bool) (*trie.Witness, error) { + return trie.NewWitnessFromReader(bytes.NewReader(input), trace) +} + +// merges witnesses into one +// corresponds to a witness built on a range of blocks +// input witnesses should be ordered by consequent blocks +// it replaces values from 2,3,4 into the first witness +func MergeWitnesses(ctx context.Context, witnesses []*trie.Witness) (*trie.Witness, error) { + if len(witnesses) == 0 { + return nil, ErrNoWitnesses + } + + if len(witnesses) == 1 { + return witnesses[0], nil + } + + baseSmt, err := smt.BuildSMTFromWitness(witnesses[0]) + if err != nil { + return nil, fmt.Errorf("BuildSMTfromWitness: %w", err) + } + for i := 1; i < len(witnesses); i++ { + if err := smt.AddWitnessToSMT(baseSmt, witnesses[i]); err != nil { + return nil, fmt.Errorf("AddWitnessToSMT: %w", err) + } + } + + // if full is true, we will send all the nodes to the witness + rl := &trie.AlwaysTrueRetainDecider{} + + witness, err := baseSmt.BuildWitness(rl, ctx) + if err != nil { + return nil, fmt.Errorf("BuildWitness: %w", err) + } + + return witness, nil +} diff --git a/zk/witness/witness_utils_test.go b/zk/witness/witness_utils_test.go new file mode 100644 index 00000000000..5e14e1abbe7 --- /dev/null +++ b/zk/witness/witness_utils_test.go @@ -0,0 +1,203 @@ +package witness + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/status-im/keycard-go/hexutils" + "github.com/stretchr/testify/assert" +) + +func TestMergeWitnesses(t *testing.T) { + smt1 := smt.NewSMT(nil, false) + smt2 := smt.NewSMT(nil, false) + smtFull := smt.NewSMT(nil, false) + + random := rand.New(rand.NewSource(0)) + + numberOfAccounts := 500 + + for i := 0; i < numberOfAccounts; i++ { + a := getAddressForIndex(i) + addressBytes := crypto.Keccak256(a[:]) + address := common.BytesToAddress(addressBytes).String() + balance := new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) + nonce := new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) + bytecode := "afafaf" + contractStorage := make(map[string]string) + for j := 0; j < 10; j++ { + storageKey := genRandomByteArrayOfLen(32) + storageValue := genRandomByteArrayOfLen(32) + contractStorage[common.BytesToHash(storageKey).Hex()] = common.BytesToHash(storageValue).Hex() + } + var smtPart *smt.SMT + + if i&1 == 0 { + smtPart = smt1 + } else { + smtPart = smt2 + } + + if _, err := smtPart.SetAccountBalance(address, balance); err != nil { + t.Error(err) + return + } + if _, err := smtPart.SetAccountNonce(address, nonce); err != nil { + t.Error(err) + return + } + if err := smtPart.SetContractBytecode(address, bytecode); err != nil { + t.Error(err) + return + } + if err := smtPart.Db.AddCode(hexutils.HexToBytes(bytecode)); err != nil { + t.Error(err) + return + } + if _, err := smtPart.SetContractStorage(address, contractStorage, nil); err != nil { + t.Error(err) + return + } + + if _, err := smtFull.SetAccountBalance(address, balance); err != nil { + t.Error(err) + return + } + if _, err := smtFull.SetAccountNonce(address, nonce); err != nil { + t.Error(err) + return + } + if err := smtFull.SetContractBytecode(address, bytecode); err != nil { + t.Error(err) + return + } + if err := smtFull.Db.AddCode(hexutils.HexToBytes(bytecode)); err != nil { + t.Error(err) + return + } + if _, err := smtFull.SetContractStorage(address, contractStorage, nil); err != nil { + t.Error(err) + return + } + } + + rl1 := &trie.AlwaysTrueRetainDecider{} + rl2 := &trie.AlwaysTrueRetainDecider{} + rlFull := &trie.AlwaysTrueRetainDecider{} + witness1, err := smt1.BuildWitness(rl1, context.Background()) + if err != nil { + t.Error(err) + return + } + + witness2, err := smt2.BuildWitness(rl2, context.Background()) + if err != nil { + t.Error(err) + return + } + + witnessFull, err := smtFull.BuildWitness(rlFull, context.Background()) + if err != nil { + t.Error(err) + return + } + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{witness1, witness2}) + assert.Nil(t, err, "should successfully merge witnesses") + + //create writer + var buff bytes.Buffer + mergedWitness.WriteDiff(witnessFull, &buff) + diff := buff.String() + assert.Equal(t, 0, len(diff), "witnesses should be equal") + if len(diff) > 0 { + fmt.Println(diff) + } +} + +func getAddressForIndex(index int) [20]byte { + var address [20]byte + binary.BigEndian.PutUint32(address[:], uint32(index)) + return address +} + +func genRandomByteArrayOfLen(length uint) []byte { + array := make([]byte, length) + for i := uint(0); i < length; i++ { + array[i] = byte(rand.Intn(256)) + } + return array +} + +func TestMergeRealWitnesses(t *testing.T) { + witnessBytes1, err := hex.DecodeString(witness1) + assert.NoError(t, err, "error decoding witness1") + witnessBytes2, err := hex.DecodeString(witness2) + assert.NoError(t, err, "error decoding witness2") + expectedWitnessBytes, err := hex.DecodeString(resultWitness) + assert.NoError(t, err, "error decoding expectedWitness") + + blockWitness1, err := ParseWitnessFromBytes(witnessBytes1, false) + assert.NoError(t, err, "error parsing witness1") + blockWitness2, err := ParseWitnessFromBytes(witnessBytes2, false) + assert.NoError(t, err, "error parsing witness2") + expectedWitness, err := ParseWitnessFromBytes(expectedWitnessBytes, false) + assert.NoError(t, err, "error parsing expectedWitness") + + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{blockWitness1, blockWitness2}) + assert.NoError(t, err, "error merging witnesses") + + //create writer + var buff bytes.Buffer + expectedWitness.WriteDiff(mergedWitness, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +} + +func TestMergeWitnessesWithHashNodes(t *testing.T) { + smt1 := smt.NewSMT(nil, false) + smt2 := smt.NewSMT(nil, false) + smtFull := smt.NewSMT(nil, false) + + _, err := smt1.InsertHashNode([]int{0, 0, 0}, new(big.Int).SetUint64(1)) + assert.NoError(t, err, "error inserting hash node") + _, err = smt2.InsertHashNode([]int{0, 0}, new(big.Int).SetUint64(2)) + assert.NoError(t, err, "error inserting hash node") + _, err = smtFull.InsertHashNode([]int{0, 0, 0}, new(big.Int).SetUint64(1)) + assert.NoError(t, err, "error inserting hash node") + + // get witnesses + rl1 := &trie.AlwaysTrueRetainDecider{} + rl2 := &trie.AlwaysTrueRetainDecider{} + rlFull := &trie.AlwaysTrueRetainDecider{} + blockWitness1, err := smt1.BuildWitness(rl1, context.Background()) + assert.NoError(t, err, "error building witness") + blockWitness2, err := smt2.BuildWitness(rl2, context.Background()) + assert.NoError(t, err, "error building witness") + expectedWitness, err := smtFull.BuildWitness(rlFull, context.Background()) + assert.NoError(t, err, "error building witness") + + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{blockWitness1, blockWitness2}) + assert.NoError(t, err, "error merging witnesses") + + //create writer + var buff bytes.Buffer + expectedWitness.WriteDiff(mergedWitness, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +} From c481e1f5ae752659dbe5b70b23a3ce660741129e Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:23:22 +0200 Subject: [PATCH 83/88] feat: add kurtosis setup composite action (#1531) * feat: add kurtosis setup composite action * feat: update queries and replace kurtosis setup wth composite * fix: move enclave name * fix: unify kurtosis setup action * fix: add shell bash to action * fix: export l2rpc endpint --- .github/actions/setup-kurtosis/action.yml | 74 +++++++++++++++ .github/scripts/test_resequence.sh | 8 +- .github/workflows/ci_zkevm.yml | 109 ++-------------------- .github/workflows/test-resequence.yml | 49 +--------- 4 files changed, 90 insertions(+), 150 deletions(-) create mode 100644 .github/actions/setup-kurtosis/action.yml diff --git a/.github/actions/setup-kurtosis/action.yml b/.github/actions/setup-kurtosis/action.yml new file mode 100644 index 00000000000..fcea1609f7a --- /dev/null +++ b/.github/actions/setup-kurtosis/action.yml @@ -0,0 +1,74 @@ + +name: "Setup Kurtosis" +description: "Setup Kurtosis CDK for tests" +runs: + using: "composite" + steps: + - name: Checkout cdk-erigon + uses: actions/checkout@v4 + with: + path: cdk-erigon + + - name: Checkout kurtosis-cdk + uses: actions/checkout@v4 + with: + repository: 0xPolygon/kurtosis-cdk + ref: v0.2.24 + path: kurtosis-cdk + + - name: Install Kurtosis CDK tools + uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Install polycli + shell: bash + run: | + tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" + sudo chmod +x /usr/local/bin/polycli + /usr/local/bin/polycli version + + - name: Install yq + shell: bash + run: | + sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq + sudo chmod +x /usr/local/bin/yq + /usr/local/bin/yq --version + + - name: Build docker image + working-directory: ./cdk-erigon + shell: bash + run: docker build -t cdk-erigon:local --file Dockerfile . + + - name: Remove unused flags + working-directory: ./kurtosis-cdk + shell: bash + run: | + sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm\.sequencer-initial-fork-id/d' ./templates/cdk-erigon/config.yml + sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm\.pool-manager-url/d' ./templates/cdk-erigon/config.yml + sed -i '$a\zkevm.disable-virtual-counters: true' ./templates/cdk-erigon/config.yml + sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml + + - name: Create params.yml overrides + working-directory: ./kurtosis-cdk + shell: bash + run: | + echo 'args:' > params.yml + echo ' cdk_erigon_node_image: cdk-erigon:local' >> params.yml + echo ' el-1-geth-lighthouse: ethpandaops/lighthouse@sha256:4902d9e4a6b6b8d4c136ea54f0e51582a32f356f3dec7194a1adee13ed2d662e' >> params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml + sed -i 's/"londonBlock": [0-9]\+/"londonBlock": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"normalcyBlock": [0-9]\+/"normalcyBlock": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"shanghaiTime": [0-9]\+/"shanghaiTime": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"cancunTime": [0-9]\+/"cancunTime": 0/' ./templates/cdk-erigon/chainspec.json + sed -i '/"terminalTotalDifficulty"/d' ./templates/cdk-erigon/chainspec.json + + - name: Deploy Kurtosis CDK package + working-directory: ./kurtosis-cdk + shell: bash + run: | + kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . '{"args": {"erigon_strict_mode": false, "cdk_erigon_node_image": "cdk-erigon:local"}}' \ No newline at end of file diff --git a/.github/scripts/test_resequence.sh b/.github/scripts/test_resequence.sh index b36bc878236..d59c780a33c 100755 --- a/.github/scripts/test_resequence.sh +++ b/.github/scripts/test_resequence.sh @@ -50,7 +50,7 @@ wait_for_l1_batch() { current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 --json | jq -r '.[] | select(.topics[0] == "0x3e54d0825ed78523037d00a81759237eb436ce774bd546993ee67a1b67b6e766") | .topics[1]' | tail -n 1 | sed 's/^0x//') current_batch=$((16#$current_batch)) elif [ "$batch_type" = "verified" ]; then - current_batch=$(cast rpc zkevm_verifiedBatchNumber --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | sed 's/^"//;s/"$//') + current_batch=$(cast rpc zkevm_verifiedBatchNumber --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" | sed 's/^"//;s/"$//') else echo "Invalid batch type. Use 'virtual' or 'verified'." return 1 @@ -121,7 +121,7 @@ kurtosis service exec cdk-v1 cdk-erigon-sequencer-001 "nohup cdk-erigon --pprof= sleep 30 echo "Running loadtest using polycli" -/usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 600 --requests 2000 --rate-limit 500 --mode uniswapv3 --legacy +/usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 600 --requests 2000 --rate-limit 500 --mode uniswapv3 --legacy echo "Waiting for batch virtualization" if ! wait_for_l1_batch 600 "virtual"; then @@ -174,13 +174,13 @@ echo "Getting block hash from sequencer" sequencer_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-sequencer-001 rpc)" | grep "hash" | awk '{print $2}') # wait for block to be available on sync node -if ! wait_for_l2_block_number $comparison_block "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)"; then +if ! wait_for_l2_block_number $comparison_block "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)"; then echo "Failed to wait for batch verification" exit 1 fi echo "Getting block hash from node" -node_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | grep "hash" | awk '{print $2}') +node_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" | grep "hash" | awk '{print $2}') echo "Sequencer block hash: $sequencer_hash" echo "Node block hash: $node_hash" diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 21447af3ebb..443ead2bfba 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -73,46 +73,8 @@ jobs: steps: - name: Checkout cdk-erigon uses: actions/checkout@v4 - with: - path: cdk-erigon - - - name: Checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools (Kurtosis, yq, Foundry, disable analytics) - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Remove unused flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: | - kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"data_availability_mode": "${{ matrix.da-mode }}", "cdk_erigon_node_image": "cdk-erigon:local"}}' + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - name: Run process with CPU monitoring working-directory: ./cdk-erigon @@ -134,9 +96,7 @@ jobs: - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash - env: - ENCLAVE_NAME: cdk-v1 - run: timeout 900s .github/scripts/monitor-verified-batches.sh --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc) --target 20 --timeout 900 + run: timeout 900s .github/scripts/monitor-verified-batches.sh --enclave zdk-v1 --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc) --target 20 --timeout 900 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -146,9 +106,8 @@ jobs: kurtosis files download cdk-v1 bridge-config-artifact echo "BRIDGE_ADDRESS=$(/usr/local/bin/yq '.NetworkConfig.PolygonBridgeAddress' bridge-config-artifact/bridge-config.toml)" >> $GITHUB_ENV echo "ETH_RPC_URL=$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" >> $GITHUB_ENV - echo "L2_RPC_URL=$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" >> $GITHUB_ENV echo "BRIDGE_API_URL=$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)" >> $GITHUB_ENV - + echo "L2_RPC_URL=$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" >> $GITHUB_ENV - name: Clone bridge repository run: git clone --recurse-submodules -j8 https://github.com/0xPolygonHermez/zkevm-bridge-service.git -b develop bridge @@ -186,7 +145,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log @@ -210,62 +169,12 @@ jobs: - name: Checkout kurtosis-cdk uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - - name: Install polycli - run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" - sudo chmod +x /usr/local/bin/polycli - /usr/local/bin/polycli version + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Modify cdk-erigon flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm\.sequencer-initial-fork-id/d' ./templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm\.pool-manager-url/d' ./templates/cdk-erigon/config.yml - sed -i '$a\zkevm.disable-virtual-counters: true' ./templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - sed -i 's/"londonBlock": [0-9]\+/"londonBlock": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"normalcyBlock": [0-9]\+/"normalcyBlock": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"shanghaiTime": [0-9]\+/"shanghaiTime": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"cancunTime": [0-9]\+/"cancunTime": 0/' ./templates/cdk-erigon/chainspec.json - sed -i '/"terminalTotalDifficulty"/d' ./templates/cdk-erigon/chainspec.json - - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: | - kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"erigon_strict_mode": false, "cdk_erigon_node_image": "cdk-erigon:local"}}' - - name: Dynamic gas fee tx load test working-directory: ./kurtosis-cdk - run: /usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 700 --requests 500 --rate-limit 50 --mode uniswapv3 + run: /usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 700 --requests 500 --rate-limit 50 --mode uniswapv3 --legacy - name: Upload logs uses: actions/upload-artifact@v3 @@ -279,7 +188,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log - name: Upload logs diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 63029d21c56..cee0118e0e1 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -18,51 +18,8 @@ jobs: steps: - name: Checkout cdk-erigon uses: actions/checkout@v4 - with: - path: cdk-erigon - - - name: Checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - name: Install polycli - run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" - sudo chmod +x /usr/local/bin/polycli - /usr/local/bin/polycli version - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Remove unused flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.pool-manager-url/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - name: Test resequence working-directory: ./cdk-erigon @@ -80,7 +37,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log From 84d960d0bb1b4b24433cd94b85373d715cf82a50 Mon Sep 17 00:00:00 2001 From: Valentin Staykov <79150443+V-Staykov@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:06:56 +0200 Subject: [PATCH 84/88] feat(rpc): use old l1 gas price if l1 not available (#1524) --- turbo/jsonrpc/eth_system_zk.go | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/turbo/jsonrpc/eth_system_zk.go b/turbo/jsonrpc/eth_system_zk.go index d50f8d7016b..2d2185d3248 100644 --- a/turbo/jsonrpc/eth_system_zk.go +++ b/turbo/jsonrpc/eth_system_zk.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon/ethclient" "github.com/ledgerwatch/erigon/zkevm/encoding" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" "github.com/ledgerwatch/log/v3" @@ -39,23 +40,15 @@ func (api *APIImpl) GasPrice(ctx context.Context) (*hexutil.Big, error) { return &price, nil } - res, err := client.JSONRPCCall(api.l2RpcUrl, "eth_gasPrice") + client, err := ethclient.DialContext(ctx, api.l2RpcUrl) if err != nil { return nil, err } + defer client.Close() - if res.Error != nil { - return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) - } - - var resultString string - if err := json.Unmarshal(res.Result, &resultString); err != nil { - return nil, fmt.Errorf("failed to unmarshal result: %v", err) - } - - price, ok := big.NewInt(0).SetString(resultString[2:], 16) - if !ok { - return nil, fmt.Errorf("failed to convert result to big.Int") + price, err := client.SuggestGasPrice(ctx) + if err != nil { + return nil, err } return (*hexutil.Big)(price), nil @@ -71,11 +64,13 @@ func (api *APIImpl) GasPrice_nonRedirected(ctx context.Context) (*hexutil.Big, e if time.Since(api.L1GasPrice.timestamp) > 3*time.Second || api.L1GasPrice.gasPrice == nil { l1GasPrice, err := api.l1GasPrice() if err != nil { - return nil, err - } - api.L1GasPrice = L1GasPrice{ - timestamp: time.Now(), - gasPrice: l1GasPrice, + log.Debug("Failed to get L1 gas price: ", err) + + } else { + api.L1GasPrice = L1GasPrice{ + timestamp: time.Now(), + gasPrice: l1GasPrice, + } } } From 9be5df27036ba6ceba5cde6a706f37ad0d3d579a Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:54:11 +0000 Subject: [PATCH 85/88] sequencer to report correct block height (#1527) --- turbo/jsonrpc/eth_block_zkevm.go | 8 +++++++- turbo/rpchelper/rpc_block.go | 26 +++++++++++++++----------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/turbo/jsonrpc/eth_block_zkevm.go b/turbo/jsonrpc/eth_block_zkevm.go index 6f82477f685..3bf04c81dd8 100644 --- a/turbo/jsonrpc/eth_block_zkevm.go +++ b/turbo/jsonrpc/eth_block_zkevm.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stateBlockNumberOrHash rpc.BlockNumberOrHash, timeoutMilliSecondsPtr *int64) (map[string]interface{}, error) { @@ -217,7 +218,12 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber defer tx.Rollback() // get latest finished block - finishedBlock, err := stages.GetStageProgress(tx, stages.Finish) + var finishedBlock uint64 + if sequencer.IsSequencer() { + finishedBlock, err = stages.GetStageProgress(tx, stages.Execution) + } else { + finishedBlock, err = stages.GetStageProgress(tx, stages.Finish) + } if err != nil { return nil, err } diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go index b5da8148c29..5835209835c 100644 --- a/turbo/rpchelper/rpc_block.go +++ b/turbo/rpchelper/rpc_block.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) var UnknownBlockError = &rpc.CustomError{ @@ -18,15 +19,13 @@ var UnknownBlockError = &rpc.CustomError{ } func GetLatestFinishedBlockNumber(tx kv.Tx) (uint64, error) { - forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) - if forkchoiceHeadHash != (libcommon.Hash{}) { - forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) - if forkchoiceHeadNum != nil { - return *forkchoiceHeadNum, nil - } + var blockNum uint64 + var err error + if sequencer.IsSequencer() { + blockNum, err = stages.GetStageProgress(tx, stages.Execution) + } else { + blockNum, err = stages.GetStageProgress(tx, stages.Finish) } - - blockNum, err := stages.GetStageProgress(tx, stages.Finish) if err != nil { return 0, fmt.Errorf("getting latest block number: %w", err) } @@ -48,14 +47,19 @@ func GetFinalizedBlockNumber(tx kv.Tx) (uint64, error) { return 0, err } - finishedBlockNumber, err := stages.GetStageProgress(tx, stages.Finish) + var highestBlockNumber uint64 + if sequencer.IsSequencer() { + highestBlockNumber, err = stages.GetStageProgress(tx, stages.Execution) + } else { + highestBlockNumber, err = stages.GetStageProgress(tx, stages.Finish) + } if err != nil { return 0, fmt.Errorf("getting latest finished block number: %w", err) } blockNumber := highestVerifiedBlock - if finishedBlockNumber < blockNumber { - blockNumber = finishedBlockNumber + if highestBlockNumber < blockNumber { + blockNumber = highestBlockNumber } return blockNumber, nil From 54c6672c4b45ce7520da9a8a0b94e462b0518a59 Mon Sep 17 00:00:00 2001 From: Moretti Georgiev Date: Tue, 3 Dec 2024 15:08:44 +0200 Subject: [PATCH 86/88] fix: replace L1 rpc url in error for getting l1 gas price (#1533) --- turbo/jsonrpc/eth_system_zk.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/turbo/jsonrpc/eth_system_zk.go b/turbo/jsonrpc/eth_system_zk.go index 2d2185d3248..ffdce335075 100644 --- a/turbo/jsonrpc/eth_system_zk.go +++ b/turbo/jsonrpc/eth_system_zk.go @@ -6,9 +6,11 @@ import ( "fmt" "math/big" "strconv" + "strings" "time" "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/ethclient" "github.com/ledgerwatch/erigon/zkevm/encoding" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" @@ -119,6 +121,10 @@ func (api *APIImpl) l1GasPrice() (*big.Int, error) { } if res.Error != nil { + if strings.Contains(res.Error.Message, api.L1RpcUrl) { + replacement := fmt.Sprintf("<%s>", utils.L1RpcUrlFlag.Name) + res.Error.Message = strings.ReplaceAll(res.Error.Message, api.L1RpcUrl, replacement) + } return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) } From ed6199928747cac0fc6de266b2222080e1465607 Mon Sep 17 00:00:00 2001 From: Scott Fairclough <70711990+hexoscott@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:28:29 +0000 Subject: [PATCH 87/88] do not build empty blocks on detecting an overflow (#1525) also stop processing transactions when the block timer has ticked to give more consistent block times --- .../test-contracts/contracts/GasBurner.sol | 12 +++++ zk/debug_tools/test-contracts/package.json | 3 +- .../test-contracts/scripts/gas-burner.js | 26 +++++++++++ zk/stages/stage_sequence_execute.go | 44 +++++++++++++++---- 4 files changed, 76 insertions(+), 9 deletions(-) create mode 100644 zk/debug_tools/test-contracts/contracts/GasBurner.sol create mode 100644 zk/debug_tools/test-contracts/scripts/gas-burner.js diff --git a/zk/debug_tools/test-contracts/contracts/GasBurner.sol b/zk/debug_tools/test-contracts/contracts/GasBurner.sol new file mode 100644 index 00000000000..bdc7a8d8db8 --- /dev/null +++ b/zk/debug_tools/test-contracts/contracts/GasBurner.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +contract GasBurner { + constructor() { + //dynamic array + uint[] memory a = new uint[](12000); + for (uint i = 0; i < 2000; i++) { + a[i%10000] = i; + } + } +} \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/package.json b/zk/debug_tools/test-contracts/package.json index 7022ac7abf5..3164dd36bc4 100644 --- a/zk/debug_tools/test-contracts/package.json +++ b/zk/debug_tools/test-contracts/package.json @@ -21,7 +21,8 @@ "chainCall:local": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network local", "chainCall:sepolia": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network sepolia", "create:local": "npx hardhat compile && npx hardhat run scripts/create.js --network local", - "keccak:local": "npx hardhat compile && npx hardhat run scripts/keccak-loop.js --network local" + "keccak:local": "npx hardhat compile && npx hardhat run scripts/keccak-loop.js --network local", + "gasBurner:local": "npx hardhat compile && npx hardhat run scripts/gas-burner.js --network local" }, "keywords": [], "author": "", diff --git a/zk/debug_tools/test-contracts/scripts/gas-burner.js b/zk/debug_tools/test-contracts/scripts/gas-burner.js new file mode 100644 index 00000000000..e3564d84582 --- /dev/null +++ b/zk/debug_tools/test-contracts/scripts/gas-burner.js @@ -0,0 +1,26 @@ +async function main() { +try { + // Get the ContractFactory of your BigLoopContract + const GasBurnerContract = await hre.ethers.getContractFactory("GasBurner"); + + // Deploy the contract + const contract = await GasBurnerContract.deploy(); + // Wait for the deployment transaction to be mined + await contract.waitForDeployment(); + + console.log(`GasBurner deployed to: ${await contract.getAddress()}`); + + // const result = await contract.bigLoop(10000); + // console.log(result); + } catch (error) { + console.error(error); + process.exit(1); + } +} + +main() + .then(() => process.exit(0)) + .catch(error => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 72718589b88..3710ed6025d 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -307,8 +307,14 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] Waiting for txs from the pool...", logPrefix)) } - LOOP_TRANSACTIONS: + innerBreak := false + emptyBlockOverflow := false + + OuterLoopTransactions: for { + if innerBreak { + break + } select { case <-logTicker.C: if !batchState.isAnyRecovery() { @@ -316,7 +322,7 @@ func sequencingBatchStep( } case <-blockTicker.C: if !batchState.isAnyRecovery() { - break LOOP_TRANSACTIONS + break OuterLoopTransactions } case <-batchTicker.C: if !batchState.isAnyRecovery() { @@ -380,7 +386,19 @@ func sequencingBatchStep( badTxHashes := make([]common.Hash, 0) minedTxHashes := make([]common.Hash, 0) + + InnerLoopTransactions: for i, transaction := range batchState.blockState.transactionsForInclusion { + // quick check if we should stop handling transactions + select { + case <-blockTicker.C: + if !batchState.isAnyRecovery() { + innerBreak = true + break InnerLoopTransactions + } + default: + } + txHash := transaction.Hash() effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) @@ -457,7 +475,10 @@ func sequencingBatchStep( if batchState.reachedOverflowTransactionLimit() || cfg.zk.SealBatchImmediatelyOnOverflow { log.Info(fmt.Sprintf("[%s] closing batch due to counters", logPrefix), "counters: ", batchState.overflowTransactions, "immediate", cfg.zk.SealBatchImmediatelyOnOverflow) runLoopBlocks = false - break LOOP_TRANSACTIONS + if len(batchState.blockState.builtBlockElements.transactions) == 0 { + emptyBlockOverflow = true + } + break OuterLoopTransactions } } @@ -474,7 +495,7 @@ func sequencingBatchStep( } log.Info(fmt.Sprintf("[%s] gas overflowed adding transaction to block", logPrefix), "block", blockNumber, "tx-hash", txHash) runLoopBlocks = false - break LOOP_TRANSACTIONS + break OuterLoopTransactions case overflowNone: } @@ -494,12 +515,12 @@ func sequencingBatchStep( if len(batchState.blockState.transactionsForInclusion) == 0 { // We need to jump to the next block here if there are no transactions in current block batchState.resequenceBatchJob.UpdateLastProcessedTx(batchState.resequenceBatchJob.CurrentBlock().L2Blockhash) - break LOOP_TRANSACTIONS + break OuterLoopTransactions } if batchState.resequenceBatchJob.AtNewBlockBoundary() { // We need to jump to the next block here if we are at the end of the current block - break LOOP_TRANSACTIONS + break OuterLoopTransactions } else { if cfg.zk.SequencerResequenceStrict { return fmt.Errorf("strict mode enabled, but resequenced batch %d has transactions that overflowed counters or failed transactions", batchState.batchNumber) @@ -533,16 +554,23 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] L1 recovery no more transactions to recover", logPrefix)) } - break LOOP_TRANSACTIONS + break OuterLoopTransactions } if batchState.isLimboRecovery() { runLoopBlocks = false - break LOOP_TRANSACTIONS + break OuterLoopTransactions } } } + // we do not want to commit this block if it has no transactions and we detected an overflow - essentially the batch is too + // full to get any more transactions in it and we don't want to commit an empty block + if emptyBlockOverflow { + log.Info(fmt.Sprintf("[%s] Block %d overflow detected with no transactions added, skipping block for next batch", logPrefix, blockNumber)) + break + } + if block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters); err != nil { return err } From 82415f552ff20615c985a889088689784fe2e3de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:09:20 +0100 Subject: [PATCH 88/88] Encode transactions to RLP format in zero tracer (#1360) * feat: set NewTxnTrieNode to TxnMeta and add some TODOs * feat: remove NewTxnTrieNode and BlockUsedCodeHashes * chore: minor simplifications --- core/types/trace.go | 3 --- eth/tracers/native/zero.go | 41 +++++++++++++++++++------------------- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/core/types/trace.go b/core/types/trace.go index 979b7fe6fbc..73d71326fab 100644 --- a/core/types/trace.go +++ b/core/types/trace.go @@ -78,7 +78,6 @@ func (t *TxnTrace) MarshalJSON() ([]byte, error) { type TxnMeta struct { ByteCode HexBytes `json:"byte_code,omitempty"` - NewTxnTrieNode HexBytes `json:"new_txn_trie_node_byte,omitempty"` NewReceiptTrieNode HexBytes `json:"new_receipt_trie_node_byte,omitempty"` GasUsed uint64 `json:"gas_used,omitempty"` } @@ -88,8 +87,6 @@ type TxnInfo struct { Meta TxnMeta `json:"meta,omitempty"` } -type BlockUsedCodeHashes []libcommon.Hash - type CombinedPreImages struct { Compact HexBytes `json:"compact,omitempty"` } diff --git a/eth/tracers/native/zero.go b/eth/tracers/native/zero.go index 3593a38eea1..e5228bfec16 100644 --- a/eth/tracers/native/zero.go +++ b/eth/tracers/native/zero.go @@ -27,7 +27,8 @@ func init() { } type zeroTracer struct { - noopTracer // stub struct to mock not used interface methods + noopTracer // stub struct to mock not used interface methods + env *vm.EVM tx types.TxnInfo gasLimit uint64 // Amount of gas bought for the whole tx @@ -39,7 +40,7 @@ type zeroTracer struct { addrOpCodes map[libcommon.Address]map[vm.OpCode]struct{} } -func newZeroTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newZeroTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { return &zeroTracer{ tx: types.TxnInfo{ Traces: make(map[libcommon.Address]*types.TxnTrace), @@ -72,19 +73,22 @@ func (t *zeroTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcom } } + receiverTxTrace := t.tx.Traces[to] + senderTxTrace := t.tx.Traces[from] + // The recipient balance includes the value transferred. - toBal := new(big.Int).Sub(t.tx.Traces[to].Balance.ToBig(), value.ToBig()) - t.tx.Traces[to].Balance = uint256.MustFromBig(toBal) + toBal := new(big.Int).Sub(receiverTxTrace.Balance.ToBig(), value.ToBig()) + receiverTxTrace.Balance = uint256.MustFromBig(toBal) // The sender balance is after reducing: value and gasLimit. // We need to re-add them to get the pre-tx balance. - fromBal := new(big.Int).Set(t.tx.Traces[from].Balance.ToBig()) + fromBal := new(big.Int).Set(senderTxTrace.Balance.ToBig()) gasPrice := env.TxContext.GasPrice consumedGas := new(big.Int).Mul(gasPrice.ToBig(), new(big.Int).SetUint64(t.gasLimit)) fromBal.Add(fromBal, new(big.Int).Add(value.ToBig(), consumedGas)) - t.tx.Traces[from].Balance = uint256.MustFromBig(fromBal) - if t.tx.Traces[from].Nonce.Cmp(uint256.NewInt(0)) > 0 { - t.tx.Traces[from].Nonce.Sub(t.tx.Traces[from].Nonce, uint256.NewInt(1)) + senderTxTrace.Balance = uint256.MustFromBig(fromBal) + if senderTxTrace.Nonce.Cmp(uint256.NewInt(0)) > 0 { + senderTxTrace.Nonce.Sub(senderTxTrace.Nonce, uint256.NewInt(1)) } } @@ -291,24 +295,22 @@ func (t *zeroTracer) CaptureTxEnd(restGas uint64) { // Set the receipt logs and create a bloom for filtering receipt.Logs = t.env.IntraBlockState().GetLogs(t.ctx.Txn.Hash()) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - receipt.BlockNumber = big.NewInt(0).SetUint64(t.ctx.BlockNum) + receipt.BlockNumber = new(big.Int).SetUint64(t.ctx.BlockNum) receipt.TransactionIndex = uint(t.ctx.TxIndex) receiptBuffer := &bytes.Buffer{} - encodeErr := receipt.EncodeRLP(receiptBuffer) - - if encodeErr != nil { - log.Error("failed to encode receipt", "err", encodeErr) + err := receipt.EncodeRLP(receiptBuffer) + if err != nil { + log.Error("failed to encode receipt", "err", err) return } t.tx.Meta.NewReceiptTrieNode = receiptBuffer.Bytes() txBuffer := &bytes.Buffer{} - encodeErr = t.ctx.Txn.MarshalBinary(txBuffer) - - if encodeErr != nil { - log.Error("failed to encode transaction", "err", encodeErr) + err = t.ctx.Txn.EncodeRLP(txBuffer) + if err != nil { + log.Error("failed to encode transaction", "err", err) return } @@ -326,10 +328,7 @@ func (t *zeroTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *zeroTracer) GetResult() (json.RawMessage, error) { - var res []byte - var err error - res, err = json.Marshal(t.tx) - + res, err := json.Marshal(t.tx) if err != nil { return nil, err }