Skip to content

Commit

Permalink
Merge pull request #222 from bnb-chain/develop
Browse files Browse the repository at this point in the history
prepare for v0.5.2 release
  • Loading branch information
owen-reorg authored Nov 13, 2024
2 parents b7c798e + d15539f commit a185a91
Show file tree
Hide file tree
Showing 71 changed files with 2,343 additions and 483 deletions.
25 changes: 25 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,30 @@
# Changelog

## v0.5.2

This is a minor release for opBNB Mainnet and Testnet.

It includes several optimizations and improvements, including the introduction of a new feature to automatically recover from unexpected shutdowns, support for multi-database features, and fixes to various bugs.

Upgrading is optional.

### What's Changed
* feat: add recover node buffer list for pathdb by @sysvm in https://github.com/bnb-chain/op-geth/pull/126
* fix(op-geth): add new field in SimulateGaslessBundleResp by @redhdx in https://github.com/bnb-chain/op-geth/pull/205
* feat: support multi database feature for op by @jingjunLi in https://github.com/bnb-chain/op-geth/pull/127
* fix: Fix pbss snapshot inconsistency with engine-sync enabled when starting by @krish-nr in https://github.com/bnb-chain/op-geth/pull/189
* fix: fix StateScheme overwrite bug by @jingjunLi in https://github.com/bnb-chain/op-geth/pull/220
* fix(op-geth): fix gasless receipt l1fee by @redhdx in https://github.com/bnb-chain/op-geth/pull/219
* feat: sequencer auto recover when meet an unexpected shutdown by @krish-nr in https://github.com/bnb-chain/op-geth/pull/166

### New Contributors
* @jingjunLi made their first contribution in https://github.com/bnb-chain/op-geth/pull/127

### Docker Images
ghcr.io/bnb-chain/op-geth:v0.5.2

**Full Changelog**: https://github.com/bnb-chain/op-geth/compare/v0.5.1...v0.5.2

## v0.5.1

This release includes various optimizations and improvements to transaction processing, CI support, and network infrastructure.
Expand Down
29 changes: 26 additions & 3 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ var (
utils.CachePreimagesFlag,
utils.OverrideCancun,
utils.OverrideVerkle,
utils.MultiDataBaseFlag,
}, utils.DatabaseFlags),
Description: `
The init command initializes a new genesis block and definition for the network.
Expand Down Expand Up @@ -221,13 +222,28 @@ func initGenesis(ctx *cli.Context) error {
overrides.OverrideVerkle = &v
}
for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false, false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
// if the trie data dir has been set, new trie db with a new state database
if ctx.IsSet(utils.MultiDataBaseFlag.Name) {
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, true)
if dbErr != nil {
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
}
chaindb.SetStateStore(statediskdb)
blockdb, err := stack.OpenDatabaseWithFreezer(name+"/block", 0, 0, "", "", false, true)
if err != nil {
utils.Fatalf("Failed to open separate block database: %v", err)
}
chaindb.SetBlockStore(blockdb)
log.Warn("Multi-database is an experimental feature")
}

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle(), true)
defer triedb.Close()

_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
Expand Down Expand Up @@ -265,6 +281,13 @@ func dumpGenesis(ctx *cli.Context) error {
}
continue
}
// set the separate state & block database
if stack.CheckIfMultiDataBase() && err == nil {
stateDiskDb := utils.MakeStateDataBase(ctx, stack, true)
db.SetStateStore(stateDiskDb)
blockDb := utils.MakeBlockDatabase(ctx, stack, true)
db.SetBlockStore(blockDb)
}
genesis, err := core.ReadGenesis(db)
if err != nil {
utils.Fatalf("failed to read genesis: %s", err)
Expand Down Expand Up @@ -582,7 +605,7 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false, false) // always enable preimage lookup
defer triedb.Close()

state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
Expand Down
108 changes: 93 additions & 15 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,6 @@ func inspectTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

var headerBlockHash common.Hash
if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" {
Expand Down Expand Up @@ -495,14 +494,19 @@ func checkStateContent(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
it ethdb.Iterator
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
errs int
count int
startTime = time.Now()
lastLog = time.Now()
)
if stack.CheckIfMultiDataBase() {
it = rawdb.NewKeyLengthIterator(db.StateStore().NewIterator(prefix, start), 32)
} else {
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
}
for it.Next() {
count++
k := it.Key()
Expand Down Expand Up @@ -549,6 +553,13 @@ func dbStats(ctx *cli.Context) error {
defer db.Close()

showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

return nil
}

Expand All @@ -562,13 +573,38 @@ func dbCompact(ctx *cli.Context) error {
log.Info("Stats before compaction")
showLeveldbStats(db)

if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
log.Error("Compact err", "error", err)
return err
}

if stack.CheckIfMultiDataBase() {
if err := db.StateStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
if err := db.BlockStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
}

log.Info("Stats after compaction")
showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store after compaction")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store after compaction")
showLeveldbStats(db.BlockStore())
}
return nil
}

Expand All @@ -588,8 +624,17 @@ func dbGet(ctx *cli.Context) error {
log.Info("Could not decode the key", "error", err)
return err
}
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := db.Get(key)
data, err := opDb.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err
Expand All @@ -606,8 +651,14 @@ func dbTrieGet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -673,8 +724,14 @@ func dbTrieDelete(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -742,7 +799,17 @@ func dbDelete(ctx *cli.Context) error {
log.Error("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
Expand Down Expand Up @@ -780,11 +847,22 @@ func dbPut(ctx *cli.Context) error {
log.Error("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)

opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err = opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
return opDb.Put(key, value)
}

// dbDumpTrie shows the key-value slots of a given storage trie
Expand All @@ -797,7 +875,7 @@ func dbDumpTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

var (
Expand Down Expand Up @@ -875,7 +953,7 @@ func freezerInspect(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
stack.Close()
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end, stack.CheckIfMultiDataBase())
}

func importLDBdata(ctx *cli.Context) error {
Expand Down Expand Up @@ -1016,7 +1094,7 @@ func showMetaData(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

ancients, err := db.Ancients()
ancients, err := db.BlockStore().Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
Expand Down Expand Up @@ -1061,7 +1139,7 @@ func hbss2pbss(ctx *cli.Context) error {
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
db.Sync()
db.BlockStore().Sync()
defer db.Close()

config := triedb.HashDefaults
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.MultiDataBaseFlag,
utils.AllowInsecureNoTriesFlag,
utils.CacheLogSizeFlag,
utils.FDLimitFlag,
Expand Down
10 changes: 5 additions & 5 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

snapConfig := snapshot.Config{
Expand Down Expand Up @@ -299,7 +299,7 @@ func traverseState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -408,7 +408,7 @@ func traverseRawState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -573,7 +573,7 @@ func dumpState(ctx *cli.Context) error {
return err
}
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

snapConfig := snapshot.Config{
Expand Down Expand Up @@ -655,7 +655,7 @@ func snapshotExportPreimages(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

var root common.Hash
Expand Down
6 changes: 3 additions & 3 deletions cmd/utils/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,13 +524,13 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
rawdb.WritePreimages(db, preimages)
rawdb.WritePreimages(db.StateStore(), preimages)
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
rawdb.WritePreimages(db, preimages)
rawdb.WritePreimages(db.StateStore(), preimages)
}
return nil
}
Expand Down Expand Up @@ -642,7 +642,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn
}()

for item := range hashCh {
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
preimage := rawdb.ReadPreimage(chaindb.StateStore(), item.Hash)
if len(preimage) == 0 {
return fmt.Errorf("missing preimage for %v", item.Hash)
}
Expand Down
Loading

0 comments on commit a185a91

Please sign in to comment.