Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/develop' into sequencer_recover_fix
Browse files Browse the repository at this point in the history
  • Loading branch information
krish-nr committed Oct 29, 2024
2 parents 13b52ea + bdcfeec commit 87c4614
Show file tree
Hide file tree
Showing 78 changed files with 2,267 additions and 518 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/docker-release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ jobs:
provenance: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

platforms: linux/amd64,linux/arm64
21 changes: 21 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,26 @@
# Changelog

## v0.5.1

This release includes various optimizations and improvements to transaction processing, CI support, and network infrastructure.

This is a minor release for opBNB Mainnet and Testnet.
Upgrading is optional.

### What's Changed

* fix(ci): support building arm64 architecture (#165)
* optimization: enqueue transactions in parallel from p2p (#173)
* optimization: enlarge p2p buffer size and add some metrics for performance monitor (#171)
* optimization: txpool pricedlist only reheap when pool is full (#175)
* optimization: txpool pending cache improvement (#177)
* chore: add bootnode in us region(testnet) (#194)

### Docker Images
ghcr.io/bnb-chain/op-geth:v0.5.1

**Full Changelog**: https://github.com/bnb-chain/op-geth/compare/v0.5.0...v0.5.1

## v0.5.0
This release includes code merging from the upstream version v1.101315.2 along with several fixs and improvements. Fjord fork from upstream is included.
Fjord fork is scheduled to launch on the opBNB:
Expand Down
3 changes: 3 additions & 0 deletions accounts/abi/bind/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ func TestWaitDeployed(t *testing.T) {

// Send and mine the transaction.
backend.Client().SendTransaction(ctx, tx)
time.Sleep(500 * time.Millisecond) //wait for the tx to be mined
backend.Commit()

select {
Expand Down Expand Up @@ -117,6 +118,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
backend.Client().SendTransaction(ctx, tx)
time.Sleep(500 * time.Millisecond) //wait for the tx to be mined
backend.Commit()
notContractCreation := errors.New("tx is not contract creation")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
Expand All @@ -135,5 +137,6 @@ func TestWaitDeployedCornerCases(t *testing.T) {
}()

backend.Client().SendTransaction(ctx, tx)
time.Sleep(500 * time.Millisecond) //wait for the tx to be mined
cancel()
}
29 changes: 26 additions & 3 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ var (
utils.CachePreimagesFlag,
utils.OverrideCancun,
utils.OverrideVerkle,
utils.MultiDataBaseFlag,
}, utils.DatabaseFlags),
Description: `
The init command initializes a new genesis block and definition for the network.
Expand Down Expand Up @@ -221,13 +222,28 @@ func initGenesis(ctx *cli.Context) error {
overrides.OverrideVerkle = &v
}
for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false, false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
// if the trie data dir has been set, new trie db with a new state database
if ctx.IsSet(utils.MultiDataBaseFlag.Name) {
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, true)
if dbErr != nil {
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
}
chaindb.SetStateStore(statediskdb)
blockdb, err := stack.OpenDatabaseWithFreezer(name+"/block", 0, 0, "", "", false, true)
if err != nil {
utils.Fatalf("Failed to open separate block database: %v", err)
}
chaindb.SetBlockStore(blockdb)
log.Warn("Multi-database is an experimental feature")
}

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle(), true)
defer triedb.Close()

_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
Expand Down Expand Up @@ -265,6 +281,13 @@ func dumpGenesis(ctx *cli.Context) error {
}
continue
}
// set the separate state & block database
if stack.CheckIfMultiDataBase() && err == nil {
stateDiskDb := utils.MakeStateDataBase(ctx, stack, true)
db.SetStateStore(stateDiskDb)
blockDb := utils.MakeBlockDatabase(ctx, stack, true)
db.SetBlockStore(blockDb)
}
genesis, err := core.ReadGenesis(db)
if err != nil {
utils.Fatalf("failed to read genesis: %s", err)
Expand Down Expand Up @@ -582,7 +605,7 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false, false) // always enable preimage lookup
defer triedb.Close()

state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
Expand Down
108 changes: 93 additions & 15 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,6 @@ func inspectTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

var headerBlockHash common.Hash
if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" {
Expand Down Expand Up @@ -495,14 +494,19 @@ func checkStateContent(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
it ethdb.Iterator
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
errs int
count int
startTime = time.Now()
lastLog = time.Now()
)
if stack.CheckIfMultiDataBase() {
it = rawdb.NewKeyLengthIterator(db.StateStore().NewIterator(prefix, start), 32)
} else {
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
}
for it.Next() {
count++
k := it.Key()
Expand Down Expand Up @@ -549,6 +553,13 @@ func dbStats(ctx *cli.Context) error {
defer db.Close()

showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

return nil
}

Expand All @@ -562,13 +573,38 @@ func dbCompact(ctx *cli.Context) error {
log.Info("Stats before compaction")
showLeveldbStats(db)

if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store")
showLeveldbStats(db.BlockStore())
}

log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
log.Error("Compact err", "error", err)
return err
}

if stack.CheckIfMultiDataBase() {
if err := db.StateStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
if err := db.BlockStore().Compact(nil, nil); err != nil {
log.Error("Compact err", "error", err)
return err
}
}

log.Info("Stats after compaction")
showLeveldbStats(db)
if stack.CheckIfMultiDataBase() {
fmt.Println("show stats of state store after compaction")
showLeveldbStats(db.StateStore())
fmt.Println("show stats of block store after compaction")
showLeveldbStats(db.BlockStore())
}
return nil
}

Expand All @@ -588,8 +624,17 @@ func dbGet(ctx *cli.Context) error {
log.Info("Could not decode the key", "error", err)
return err
}
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := db.Get(key)
data, err := opDb.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err
Expand All @@ -606,8 +651,14 @@ func dbTrieGet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -673,8 +724,14 @@ func dbTrieDelete(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var db ethdb.Database
chaindb := utils.MakeChainDatabase(ctx, stack, true)
if chaindb.StateStore() != nil {
db = chaindb.StateStore()
} else {
db = chaindb
}
defer chaindb.Close()

scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
Expand Down Expand Up @@ -742,7 +799,17 @@ func dbDelete(ctx *cli.Context) error {
log.Error("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err := opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
Expand Down Expand Up @@ -780,11 +847,22 @@ func dbPut(ctx *cli.Context) error {
log.Error("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)

opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
}
}

data, err = opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
return opDb.Put(key, value)
}

// dbDumpTrie shows the key-value slots of a given storage trie
Expand All @@ -797,7 +875,7 @@ func dbDumpTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

var (
Expand Down Expand Up @@ -875,7 +953,7 @@ func freezerInspect(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
stack.Close()
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end, stack.CheckIfMultiDataBase())
}

func importLDBdata(ctx *cli.Context) error {
Expand Down Expand Up @@ -1016,7 +1094,7 @@ func showMetaData(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

ancients, err := db.Ancients()
ancients, err := db.BlockStore().Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
Expand Down Expand Up @@ -1061,7 +1139,7 @@ func hbss2pbss(ctx *cli.Context) error {
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false)
db.Sync()
db.BlockStore().Sync()
defer db.Close()

config := triedb.HashDefaults
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.MultiDataBaseFlag,
utils.AllowInsecureNoTriesFlag,
utils.CacheLogSizeFlag,
utils.FDLimitFlag,
Expand Down
10 changes: 5 additions & 5 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

snapConfig := snapshot.Config{
Expand Down Expand Up @@ -299,7 +299,7 @@ func traverseState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -408,7 +408,7 @@ func traverseRawState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -573,7 +573,7 @@ func dumpState(ctx *cli.Context) error {
return err
}
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

snapConfig := snapshot.Config{
Expand Down Expand Up @@ -655,7 +655,7 @@ func snapshotExportPreimages(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

var root common.Hash
Expand Down
Loading

0 comments on commit 87c4614

Please sign in to comment.