Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Erigon3 add blob snapshots #489

Merged
merged 6 commits into from
Aug 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cmd/capcli/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,8 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error {
}

var bor *freezeblocks.BorRoSnapshots
blockReader := freezeblocks.NewBlockReader(allSnapshots, bor)
var bsc *freezeblocks.BscRoSnapshots
blockReader := freezeblocks.NewBlockReader(allSnapshots, bor, bsc)
eth1Getter := getters.NewExecutionSnapshotReader(ctx, blockReader, db)
eth1Getter.SetBeaconChainConfig(beaconConfig)
csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, log.Root())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func NewHeimdallSimulator(ctx context.Context, snapDir string, logger log.Logger

h := HeimdallSimulator{
snapshots: snapshots,
blockReader: freezeblocks.NewBlockReader(nil, snapshots),
blockReader: freezeblocks.NewBlockReader(nil, snapshots, nil),

iterations: iterations,

Expand Down
2 changes: 1 addition & 1 deletion cmd/hack/hack.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func printCurrentBlockNumber(chaindata string) {
}

func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) {
br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, "", 0, log.New()), nil /* BorSnapshots */)
br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, "", 0, log.New()), nil /* BorSnapshots */, nil)
bw := blockio.NewBlockWriter()
return br, bw
}
Expand Down
10 changes: 6 additions & 4 deletions cmd/integration/commands/reset_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,13 @@ var cmdResetState = &cobra.Command{
}
ctx, _ := common.RootContext()
defer db.Close()
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()

if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) }); err != nil {
if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, bscSn, agg) }); err != nil {
if !errors.Is(err, context.Canceled) {
logger.Error(err.Error())
}
Expand All @@ -75,7 +76,7 @@ var cmdResetState = &cobra.Command{

// set genesis after reset all buckets
fmt.Printf("After reset: \n")
if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) }); err != nil {
if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, bscSn, agg) }); err != nil {
if !errors.Is(err, context.Canceled) {
logger.Error(err.Error())
}
Expand Down Expand Up @@ -113,7 +114,7 @@ func init() {
rootCmd.AddCommand(cmdClearBadBlocks)
}

func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblocks.BorRoSnapshots, agg *state.Aggregator) error {
func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblocks.BorRoSnapshots, bscSn *freezeblocks.BscRoSnapshots, agg *state.Aggregator) error {
var err error
var progress uint64
w := new(tabwriter.Writer)
Expand All @@ -140,6 +141,7 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblo
fmt.Fprintf(w, "prune distance: %s\n\n", pm.String())
fmt.Fprintf(w, "blocks: segments=%d, indices=%d\n", snapshots.SegmentsMax(), snapshots.IndicesMax())
fmt.Fprintf(w, "blocks.bor: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax())
fmt.Fprintf(w, "blocks.bsc: segments=%d, indices=%d\n\n", bscSn.SegmentsMax(), bscSn.IndicesMax())

_, lastBlockInHistSnap, _ := rawdbv3.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax())
_lb, _lt, _ := rawdbv3.TxNums.Last(tx)
Expand Down
2 changes: 1 addition & 1 deletion cmd/integration/commands/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB
}

if opts.GetLabel() == kv.ChainDB {
_, _, agg, _ := allSnapshots(context.Background(), db, logger)
_, _, _, agg, _ := allSnapshots(context.Background(), db, logger)
tdb, err := temporal.New(db, agg)
if err != nil {
return nil, err
Expand Down
55 changes: 35 additions & 20 deletions cmd/integration/commands/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -584,9 +584,10 @@ func init() {
}

func stageSnapshots(db kv.RwDB, ctx context.Context, logger log.Logger) error {
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()

br, bw := blocksIO(db, logger)
Expand Down Expand Up @@ -640,9 +641,10 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
return err
}

sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
br, bw := blocksIO(db, logger)
engine, _, _, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
Expand Down Expand Up @@ -744,7 +746,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, unwindTypes []string, log
return nil
}
if unwind > 0 {
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, _, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer agg.Close()
Expand Down Expand Up @@ -775,7 +777,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, unwindTypes []string, log
return nil
}

sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, _, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer agg.Close()
Expand Down Expand Up @@ -808,9 +810,10 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, unwindTypes []string, log
}

func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error {
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
chainConfig := fromdb.ChainConfig(db)
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
Expand Down Expand Up @@ -848,9 +851,10 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error {
func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error {
tmpdir := datadir.New(datadirCli).Tmp
chainConfig := fromdb.ChainConfig(db)
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)

Expand Down Expand Up @@ -946,9 +950,10 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {

engine, vmConfig, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.Execution))
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
if warmup {
return reset2.WarmupExec(ctx, db)
Expand Down Expand Up @@ -1048,9 +1053,10 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error

engine, vmConfig, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
must(sync.SetCurrentStage(stages.Execution))
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
if warmup {
panic("not implemented")
Expand Down Expand Up @@ -1147,7 +1153,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error
func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error {
dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db)
_ = pm
sn, _, agg, _ := allSnapshots(ctx, db, logger)
sn, _, _, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer agg.Close()
_, _, _, _, _ = newSync(ctx, db, nil /* miningConfig */, logger)
Expand Down Expand Up @@ -1179,9 +1185,10 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger)
chainConfig := fromdb.ChainConfig(db)
must(sync.SetCurrentStage(stages.TxLookup))
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()

if reset {
Expand Down Expand Up @@ -1226,11 +1233,12 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
}

func printAllStages(db kv.RoDB, ctx context.Context, logger log.Logger) error {
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, agg) })
return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn, borSn, bscSn, agg) })
}

func printAppliedMigrations(db kv.RwDB, ctx context.Context, logger log.Logger) error {
Expand Down Expand Up @@ -1260,17 +1268,19 @@ func removeMigration(db kv.RwDB, ctx context.Context) error {
var openSnapshotOnce sync.Once
var _allSnapshotsSingleton *freezeblocks.RoSnapshots
var _allBorSnapshotsSingleton *freezeblocks.BorRoSnapshots
var _allBscSnapshotsSingleton *freezeblocks.BscRoSnapshots
var _allCaplinSnapshotsSingleton *freezeblocks.CaplinSnapshots
var _aggSingleton *libstate.Aggregator

func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, *freezeblocks.CaplinSnapshots) {
func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *freezeblocks.BscRoSnapshots, *libstate.Aggregator, *freezeblocks.CaplinSnapshots) {
openSnapshotOnce.Do(func() {
dirs := datadir.New(datadirCli)

snapCfg := ethconfig.NewSnapCfg(true, true, true)

_allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, 0, logger)
_allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, 0, logger)
_allBscSnapshotsSingleton = freezeblocks.NewBscRoSnapshots(snapCfg, dirs.Snap, 0, logger)
var err error
cr := rawdb.NewCanonicalReader()
_aggSingleton, err = libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, cr, logger)
Expand All @@ -1289,6 +1299,10 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
_allBorSnapshotsSingleton.OptimisticalyReopenFolder()
return nil
})
g.Go(func() error {
_allBscSnapshotsSingleton.OptimisticalyReopenFolder()
return nil
})
g.Go(func() error { return _aggSingleton.OpenFolder() })
g.Go(func() error {
chainConfig := fromdb.ChainConfig(db)
Expand Down Expand Up @@ -1320,6 +1334,7 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl

_allSnapshotsSingleton.LogStat("blocks")
_allBorSnapshotsSingleton.LogStat("bor")
_allBscSnapshotsSingleton.LogStat("bsc")
_ = db.View(context.Background(), func(tx kv.Tx) error {
ac := _aggSingleton.BeginFilesRo()
defer ac.Close()
Expand All @@ -1330,7 +1345,7 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
return nil
})
})
return _allSnapshotsSingleton, _allBorSnapshotsSingleton, _aggSingleton, _allCaplinSnapshotsSingleton
return _allSnapshotsSingleton, _allBorSnapshotsSingleton, _allBscSnapshotsSingleton, _aggSingleton, _allCaplinSnapshotsSingleton
}

var openBlockReaderOnce sync.Once
Expand All @@ -1339,8 +1354,8 @@ var _blockWriterSingleton *blockio.BlockWriter

func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) {
openBlockReaderOnce.Do(func() {
sn, borSn, _, _ := allSnapshots(context.Background(), db, logger)
_blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn)
sn, borSn, bscSn, _, _ := allSnapshots(context.Background(), db, logger)
_blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn, bscSn)
_blockWriterSingleton = blockio.NewBlockWriter()
})
return _blockReaderSingleton, _blockWriterSingleton
Expand Down Expand Up @@ -1378,7 +1393,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
cfg.Miner = *miningConfig
}
cfg.Dirs = datadir.New(datadirCli)
allSn, _, agg, _ := allSnapshots(ctx, db, logger)
allSn, _, _, agg, _ := allSnapshots(ctx, db, logger)
cfg.Snapshot = allSn.Cfg()

blockReader, blockWriter := blocksIO(db, logger)
Expand Down Expand Up @@ -1415,9 +1430,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
blockSnapBuildSema := semaphore.NewWeighted(int64(dbg.BuildSnapshotAllowance))
agg.SetSnapshotBuildSema(blockSnapBuildSema)

notifications := &shards.Notifications{}
blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, db, chainConfig, notifications.Events, blockSnapBuildSema, logger)

var (
snapDb kv.RwDB
recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot]
Expand All @@ -1434,6 +1446,9 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
blobStore = parlia.BlobStore
}

notifications := &shards.Notifications{}
blockRetire := freezeblocks.NewBlockRetire(1, dirs, blockReader, blockWriter, db, blobStore, chainConfig, notifications.Events, blockSnapBuildSema, logger)

stages := stages2.NewDefaultStages(context.Background(), db, snapDb, blobStore, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, nil, engine, heimdallClient, recents, signatures, logger)
sync := stagedsync.New(cfg.Sync, stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger)

Expand Down
3 changes: 2 additions & 1 deletion cmd/integration/commands/state_domains.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,10 @@ var readDomains = &cobra.Command{
}

func requestDomains(chainDb, stateDb kv.RwDB, ctx context.Context, readDomain string, addrs [][]byte, logger log.Logger) error {
sn, bsn, agg, _ := allSnapshots(ctx, chainDb, logger)
sn, bsn, bscSn, agg, _ := allSnapshots(ctx, chainDb, logger)
defer sn.Close()
defer bsn.Close()
defer bscSn.Close()
defer agg.Close()

aggTx := agg.BeginFilesRo()
Expand Down
6 changes: 4 additions & 2 deletions cmd/integration/commands/state_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,10 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
return err
}

sn, borSn, agg, _ := allSnapshots(ctx, db, logger1)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger1)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig, logger1)
chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db)
Expand Down Expand Up @@ -380,9 +381,10 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *chain2.Config) {
func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) error {
chainConfig := fromdb.ChainConfig(db)
dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db)
sn, borSn, agg, _ := allSnapshots(ctx, db, logger)
sn, borSn, bscSn, agg, _ := allSnapshots(ctx, db, logger)
defer sn.Close()
defer borSn.Close()
defer bscSn.Close()
defer agg.Close()
engine, vmConfig, sync, _, _ := newSync(ctx, db, nil, logger)

Expand Down
2 changes: 1 addition & 1 deletion cmd/rpcdaemon/cli/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger
}()
}
onNewSnapshot()
blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots, nil)

db, err = temporal.New(rwKv, agg)
if err != nil {
Expand Down
10 changes: 10 additions & 0 deletions cmd/rpcdaemon/rpcservices/eth_backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,11 @@ func (back *RemoteBackend) BlockByHash(ctx context.Context, db kv.Tx, hash commo
func (back *RemoteBackend) TxsV3Enabled() bool { panic("not implemented") }
func (back *RemoteBackend) Snapshots() services.BlockSnapshots { panic("not implemented") }
func (back *RemoteBackend) BorSnapshots() services.BlockSnapshots { panic("not implemented") }
func (back *RemoteBackend) BscSnapshots() services.BlockSnapshots { panic("not implemented") }
func (back *RemoteBackend) AllTypes() []snaptype.Type { panic("not implemented") }
func (back *RemoteBackend) FrozenBlocks() uint64 { return back.blockReader.FrozenBlocks() }
func (back *RemoteBackend) FrozenBorBlocks() uint64 { return back.blockReader.FrozenBorBlocks() }
func (back *RemoteBackend) FrozenBscBlobs() uint64 { return back.blockReader.FrozenBscBlobs() }
func (back *RemoteBackend) FrozenFiles() (list []string) { return back.blockReader.FrozenFiles() }
func (back *RemoteBackend) FreezingCfg() ethconfig.BlocksFreezing {
return back.blockReader.FreezingCfg()
Expand Down Expand Up @@ -324,6 +326,14 @@ func (back *RemoteBackend) BorStartEventID(ctx context.Context, tx kv.Tx, hash c
return back.blockReader.BorStartEventID(ctx, tx, hash, blockNum)
}

func (back *RemoteBackend) ReadBlobByNumber(ctx context.Context, tx kv.Getter, blockNum uint64) ([]*types.BlobSidecar, bool, error) {
return back.blockReader.ReadBlobByNumber(ctx, tx, blockNum)
}

func (back *RemoteBackend) ReadBlobTxCount(ctx context.Context, blockNum uint64, hash common.Hash) (uint32, error) {
return back.blockReader.ReadBlobTxCount(ctx, blockNum, hash)
}

func (back *RemoteBackend) LastSpanId(ctx context.Context, tx kv.Tx) (uint64, bool, error) {
return back.blockReader.LastSpanId(ctx, tx)
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/snapshots/cmp/cmp.go
Original file line number Diff line number Diff line change
Expand Up @@ -501,8 +501,8 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2
atomic.AddUint64(&compareTime, uint64(time.Since(startTime)))
}()

blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil)
blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil)
blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil, nil)
blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil, nil)

g, gctx = errgroup.WithContext(ctx)
g.SetLimit(2)
Expand Down Expand Up @@ -781,8 +781,8 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en
atomic.AddUint64(&compareTime, uint64(time.Since(startTime)))
}()

blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil)
blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil)
blockReader1 := freezeblocks.NewBlockReader(f1snaps, nil, nil)
blockReader2 := freezeblocks.NewBlockReader(f2snaps, nil, nil)

return func() error {
for i := ent1.From; i < ent1.To; i++ {
Expand Down
2 changes: 1 addition & 1 deletion cmd/state/commands/opcode_tracer.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num
defer historyTx.Rollback()

dirs := datadir2.New(filepath.Dir(chainDb.(*mdbx.MdbxKV).Path()))
blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */)
blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */, nil)

chainConfig := genesis.Config
vmConfig := vm.Config{Tracer: ot, Debug: true}
Expand Down
Loading
Loading