From fa8d0e810c641c22d29e26d264d1b39becc48165 Mon Sep 17 00:00:00 2001 From: Sunny Date: Mon, 21 Oct 2024 15:39:00 +0800 Subject: [PATCH] remove the ParallelLegacy --- cmd/geth/main.go | 1 - cmd/utils/flags.go | 29 - core/blockchain.go | 53 +- core/parallel_state_processor.go | 1236 ------------------- core/state/parallel_statedb.go | 1901 ------------------------------ core/state/state_object.go | 33 - core/state/statedb.go | 420 +------ core/state/statedb_test.go | 376 ------ core/state_processor.go | 2 +- core/vm/interpreter.go | 1 - eth/backend.go | 3 - eth/ethconfig/config.go | 1 - 12 files changed, 4 insertions(+), 4052 deletions(-) delete mode 100644 core/parallel_state_processor.go delete mode 100644 core/state/parallel_statedb.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9944682ba7..1e1df35afa 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -169,7 +169,6 @@ var ( utils.RollupComputePendingBlock, utils.RollupHaltOnIncompatibleProtocolVersionFlag, utils.RollupSuperchainUpgradesFlag, - utils.ParallelTxLegacyFlag, utils.ParallelTxFlag, utils.ParallelTxNumFlag, utils.ParallelTxDAGFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 49eeb96669..345adc691d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -29,7 +29,6 @@ import ( "net/http" "os" "path/filepath" - "runtime" godebug "runtime/debug" "strconv" "strings" @@ -1095,12 +1094,6 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Category: flags.MetricsCategory, } - ParallelTxLegacyFlag = &cli.BoolFlag{ - Name: "parallel-legacy", - Usage: "Enable the experimental parallel transaction execution mode, only valid in full sync mode (default = false)", - Category: flags.VMCategory, - } - ParallelTxFlag = &cli.BoolFlag{ Name: "parallel", Usage: "Enable the experimental parallel transaction execution mode, only valid in full sync mode (default = false)", @@ -2023,28 +2016,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name) } - if ctx.IsSet(ParallelTxLegacyFlag.Name) { - cfg.ParallelTxLegacyMode = ctx.Bool(ParallelTxLegacyFlag.Name) - // The best parallel num will be tuned later, we do a simple parallel num set here - numCpu := runtime.NumCPU() - var parallelNum int - if ctx.IsSet(ParallelTxNumFlag.Name) { - // Use value set by "--parallel.num", and "--parallel.num 0" is not allowed and be set to 1 - parallelNum = ctx.Int(ParallelTxNumFlag.Name) - if parallelNum < 1 { - parallelNum = 1 - } - } else if numCpu == 1 { - parallelNum = 1 // single CPU core - } else { - // 1-2 core for merge (with parallel KV check) - // 1-2 core for others (bc optimizer, main) - // 1-2 core for possible other concurrent routine - parallelNum = max(1, numCpu-6) - } - cfg.ParallelTxNum = parallelNum - } - if ctx.IsSet(ParallelTxFlag.Name) { cfg.ParallelTxMode = ctx.Bool(ParallelTxFlag.Name) } diff --git a/core/blockchain.go b/core/blockchain.go index 8845c88425..ea31c9d0c4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -531,11 +531,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } - if bc.vmConfig.EnableParallelExecLegacy { - bc.CreateParallelProcessor(bc.vmConfig.ParallelTxNum) - bc.CreateSerialProcessor(chainConfig, bc, engine) - log.Info("Parallel V1 enabled", "parallelNum", bc.vmConfig.ParallelTxNum) - } else if bc.vmConfig.EnableParallelExec { + if bc.vmConfig.EnableParallelExec { bc.processor = newPEVMProcessor(chainConfig, bc, engine) log.Info("Parallel V2 enabled", "parallelNum", ParallelNum()) } else { @@ -1911,20 +1907,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) statedb.StartPrefetcher("chain") activeState = statedb - if bc.vmConfig.EnableParallelExecLegacy { - bc.parseTxDAG(block) - txsCount := block.Transactions().Len() - threshold := min(bc.vmConfig.ParallelTxNum/2+2, 4) - if bc.vmConfig.ParallelTxNum < 2 || txsCount < threshold || bc.isEmptyTxDAG() { - bc.UseSerialProcessor() - log.Debug("Disable Parallel Tx execution", "block", block.NumberU64(), "transactions", txsCount, "parallelTxNum", bc.vmConfig.ParallelTxNum) - } else { - bc.UseParallelProcessor() - log.Debug("Enable Parallel Tx execution", "block", block.NumberU64(), "transactions", txsCount, "parallelTxNum", bc.vmConfig.ParallelTxNum) - - } - } - if bc.vmConfig.EnableParallelExec { bc.parseTxDAG(block) } @@ -1951,10 +1933,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) // Process block using the parent state as reference point pstart = time.Now() receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig) - if err == FallbackToSerialProcessorErr { - bc.UseSerialProcessor() - receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig) - } if err != nil { bc.reportBlock(block, receipts, err) followupInterrupt.Store(true) @@ -1973,7 +1951,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) vtime := time.Since(vstart) proctime := time.Since(start) // processing + validation - if bc.enableTxDAG && !bc.vmConfig.EnableParallelExecLegacy && !bc.vmConfig.EnableParallelExec { + if bc.enableTxDAG && !bc.vmConfig.EnableParallelExec { // compare input TxDAG when it enable in consensus dag, err := statedb.ResolveTxDAG(len(block.Transactions()), []common.Address{block.Coinbase(), params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) if err == nil { @@ -2711,14 +2689,6 @@ func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } -func (bc *BlockChain) CreateParallelProcessor(parallelNum int) *BlockChain { - if bc.parallelProcessor == nil { - bc.parallelProcessor = newParallelStateProcessor(bc.Config(), bc, bc.engine, parallelNum) - bc.parallelExecution = true - } - return bc -} - func (bc *BlockChain) NoTries() bool { return bc.stateCache.NoTries() } @@ -2799,25 +2769,6 @@ func (bc *BlockChain) SetupTxDAGGeneration(output string, readFile bool) { }() } -func (bc *BlockChain) UseParallelProcessor() { - if bc.parallelProcessor != nil { - bc.parallelExecution = true - bc.processor = bc.parallelProcessor - } else { - log.Error("bc.ParallelProcessor is nil! fallback to serial processor!") - bc.UseSerialProcessor() - } -} - -func (bc *BlockChain) UseSerialProcessor() { - if bc.serialProcessor != nil { - bc.parallelExecution = false - bc.processor = bc.serialProcessor - } else { - bc.CreateSerialProcessor(bc.chainConfig, bc, bc.engine) - } -} - type TxDAGOutputItem struct { blockNumber uint64 txDAG types.TxDAG diff --git a/core/parallel_state_processor.go b/core/parallel_state_processor.go deleted file mode 100644 index e61ffe0cb9..0000000000 --- a/core/parallel_state_processor.go +++ /dev/null @@ -1,1236 +0,0 @@ -package core - -import ( - "context" - "errors" - "fmt" - "github.com/ethereum/go-ethereum/metrics" - "runtime" - "sync" - "sync/atomic" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" -) - -const ( - parallelPrimarySlot = 0 - parallelShadowSlot = 1 - stage2CheckNumber = 30 // ConfirmStage2 will check this number of transaction, to avoid too busy stage2 check - stage2AheadNum = 3 // enter ConfirmStage2 in advance to avoid waiting for Fat Tx -) - -var ( - FallbackToSerialProcessorErr = errors.New("fallback to serial processor") -) - -type ResultHandleEnv struct { - statedb *state.StateDB - gp *GasPool - txCount int -} - -type ParallelStateProcessor struct { - StateProcessor - parallelNum int // leave a CPU to dispatcher - slotState []*SlotState // idle, or pending messages - allTxReqs []*ParallelTxRequest - txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done - mergedTxIndex atomic.Int32 // the latest finalized tx index - pendingConfirmResults *sync.Map // tx could be executed several times, with several result to check - unconfirmedResults *sync.Map // for stage2 confirm, since pendingConfirmResults can not be accessed in stage2 loop - unconfirmedDBs *sync.Map // intermediate store of slotDB that is not verified - slotDBsToRelease *sync.Map - stopSlotChan chan struct{} - stopConfirmChan chan struct{} - debugConflictRedoNum int - - confirmStage2Chan chan int - stopConfirmStage2Chan chan struct{} - txReqExecuteRecord map[int]int - txReqExecuteCount int - inConfirmStage2 bool - targetStage2Count int - nextStage2TxIndex int - delayGasFee bool - - commonTxs []*types.Transaction - receipts types.Receipts - error error - resultMutex sync.RWMutex - resultProcessChan chan *ResultHandleEnv - resultAppendChan chan struct{} - parallelDBManager *state.ParallelDBManager -} - -func newParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int) *ParallelStateProcessor { - processor := &ParallelStateProcessor{ - StateProcessor: *NewStateProcessor(config, bc, engine), - parallelNum: parallelNum, - } - processor.init() - return processor -} - -type MergedTxInfo struct { - slotDB *state.StateDB - StateObjectSuicided map[common.Address]struct{} - StateChangeSet map[common.Address]state.StateKeys - BalanceChangeSet map[common.Address]struct{} - CodeChangeSet map[common.Address]struct{} - AddrStateChangeSet map[common.Address]struct{} - txIndex int -} - -type SlotState struct { - pendingTxReqList []*ParallelTxRequest - primaryWakeUpChan chan struct{} - shadowWakeUpChan chan struct{} - primaryStopChan chan struct{} - shadowStopChan chan struct{} - activatedType int32 // 0: primary slot, 1: shadow slot -} - -type ParallelTxResult struct { - executedIndex int32 // record the current execute number of the tx - slotIndex int - txReq *ParallelTxRequest - receipt *types.Receipt - slotDB *state.ParallelStateDB - gpSlot *GasPool - evm *vm.EVM - result *ExecutionResult - originalNonce *uint64 - err error -} - -type ParallelTxRequest struct { - txIndex int - baseStateDB *state.StateDB - staticSlotIndex int - tx *types.Transaction - gasLimit uint64 - msg *Message - block *types.Block - vmConfig vm.Config - usedGas *uint64 - curTxChan chan int - runnable int32 // 0: not runnable 1: runnable - can be scheduled - executedNum atomic.Int32 - conflictIndex atomic.Int32 // the conflicted mainDB index, the txs will not be executed before this number - useDAG bool -} - -// init to initialize and start the execution goroutines -func (p *ParallelStateProcessor) init() { - log.Info("Parallel execution mode is enabled", "Parallel Num", p.parallelNum, - "CPUNum", runtime.NumCPU()) - p.txResultChan = make(chan *ParallelTxResult, 20000) - p.stopSlotChan = make(chan struct{}, 1) - p.stopConfirmChan = make(chan struct{}, 1) - p.stopConfirmStage2Chan = make(chan struct{}, 1) - - p.resultProcessChan = make(chan *ResultHandleEnv, 1) - p.resultAppendChan = make(chan struct{}, 20000) - - p.slotState = make([]*SlotState, p.parallelNum) - - p.parallelDBManager = state.NewParallelDBManager(20000, state.NewEmptySlotDB) - - quickMergeNum := 2 // p.parallelNum / 2 - for i := 0; i < p.parallelNum-quickMergeNum; i++ { - p.slotState[i] = &SlotState{ - primaryWakeUpChan: make(chan struct{}, 1), - shadowWakeUpChan: make(chan struct{}, 1), - primaryStopChan: make(chan struct{}, 1), - shadowStopChan: make(chan struct{}, 1), - } - // start the primary slot's goroutine - go func(slotIndex int) { - p.runSlotLoop(slotIndex, parallelPrimarySlot) // this loop will be permanent live - }(i) - - // start the shadow slot. - // It is back up of the primary slot to make sure transaction can be redone ASAP, - // since the primary slot could be busy at executing another transaction - go func(slotIndex int) { - p.runSlotLoop(slotIndex, parallelShadowSlot) - }(i) - } - - for i := p.parallelNum - quickMergeNum; i < p.parallelNum; i++ { - // init a quick merge slot - p.slotState[i] = &SlotState{ - primaryWakeUpChan: make(chan struct{}, 1), - shadowWakeUpChan: make(chan struct{}, 1), - primaryStopChan: make(chan struct{}, 1), - shadowStopChan: make(chan struct{}, 1), - } - go func(slotIndex int) { - p.runQuickMergeSlotLoop(slotIndex, parallelPrimarySlot) - }(i) - go func(slotIndex int) { - p.runQuickMergeSlotLoop(slotIndex, parallelShadowSlot) - }(i) - } - - p.confirmStage2Chan = make(chan int, 10) - go func() { - p.runConfirmStage2Loop() - }() - - go func() { - p.handlePendingResultLoop() - }() - -} - -// resetState clear slot state for each block. -func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { - if txNum == 0 { - return - } - p.mergedTxIndex.Store(-1) - p.debugConflictRedoNum = 0 - p.inConfirmStage2 = false - - statedb.PrepareForParallel() - p.allTxReqs = make([]*ParallelTxRequest, txNum) - - for _, slot := range p.slotState { - slot.pendingTxReqList = make([]*ParallelTxRequest, 0) - slot.activatedType = parallelPrimarySlot - } - p.unconfirmedResults = new(sync.Map) - p.unconfirmedDBs = new(sync.Map) - p.slotDBsToRelease = new(sync.Map) - p.pendingConfirmResults = new(sync.Map) - p.txReqExecuteRecord = make(map[int]int, txNum) - p.txReqExecuteCount = 0 - p.nextStage2TxIndex = 0 -} - -// Benefits of StaticDispatch: -// -// ** try best to make Txs with same From() in same slot -// ** reduce IPC cost by dispatch in Unit -// ** make sure same From in same slot -// ** try to make it balanced, queue to the most hungry slot for new Address -func (p *ParallelStateProcessor) doStaticDispatch(txReqs []*ParallelTxRequest) { - fromSlotMap := make(map[common.Address]int, 100) - toSlotMap := make(map[common.Address]int, 100) - for _, txReq := range txReqs { - var slotIndex = -1 - if i, ok := fromSlotMap[txReq.msg.From]; ok { - // first: same From goes to same slot - slotIndex = i - } else if txReq.msg.To != nil { - // To Address, with txIndex sorted, could be in different slot. - if i, ok := toSlotMap[*txReq.msg.To]; ok { - slotIndex = i - } - } - - // not found, dispatch to most hungry slot - if slotIndex == -1 { - slotIndex = p.mostHungrySlot() - } - // update - fromSlotMap[txReq.msg.From] = slotIndex - if txReq.msg.To != nil { - toSlotMap[*txReq.msg.To] = slotIndex - } - - slot := p.slotState[slotIndex] - txReq.staticSlotIndex = slotIndex // txReq is better to be executed in this slot - slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) - } -} - -func (p *ParallelStateProcessor) mostHungrySlot() int { - var ( - workload = len(p.slotState[0].pendingTxReqList) - slotIndex = 0 - ) - for i, slot := range p.slotState { // can start from index 1 - if len(slot.pendingTxReqList) < workload { - slotIndex = i - workload = len(slot.pendingTxReqList) - } - // just return the first slot with 0 workload - if workload == 0 { - return slotIndex - } - } - return slotIndex -} - -// hasConflict conducts conflict check -func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool { - slotDB := txResult.slotDB - if txResult.err != nil { - log.Info("HasConflict due to err", "err", txResult.err) - return true - } else if slotDB.NeedsRedo() { - log.Info("HasConflict needsRedo") - // if there is any reason that indicates this transaction needs to redo, skip the conflict check - return true - } else { - // check whether the slot db reads during execution are correct. - if !slotDB.IsParallelReadsValid(isStage2) { - return true - } - } - return false -} - -func (p *ParallelStateProcessor) switchSlot(slotIndex int) { - slot := p.slotState[slotIndex] - if atomic.CompareAndSwapInt32(&slot.activatedType, parallelPrimarySlot, parallelShadowSlot) { - // switch from normal to shadow slot - if len(slot.shadowWakeUpChan) == 0 { - slot.shadowWakeUpChan <- struct{}{} - } - } else if atomic.CompareAndSwapInt32(&slot.activatedType, parallelShadowSlot, parallelPrimarySlot) { - // switch from shadow to normal slot - if len(slot.primaryWakeUpChan) == 0 { - slot.primaryWakeUpChan <- struct{}{} - } - } -} - -// executeInSlot do tx execution with thread local slot. -func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { - mIndex := p.mergedTxIndex.Load() - conflictIndex := txReq.conflictIndex.Load() - if mIndex < conflictIndex { - // The conflicted TX has not been finished executing, skip. - // the transaction failed at check(nonce or balance), actually it has not been executed yet. - atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1) - return nil - } - execNum := txReq.executedNum.Add(1) - slotDB := state.NewSlotDB(txReq.baseStateDB, txReq.txIndex, int(mIndex), p.parallelDBManager, p.unconfirmedDBs, txReq.useDAG) - blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil, p.config, slotDB) // can share blockContext within a block for efficiency - txContext := NewEVMTxContext(txReq.msg) - vmenv := vm.NewEVM(blockContext, txContext, slotDB, p.config, txReq.vmConfig) - - rules := p.config.Rules(txReq.block.Number(), blockContext.Random != nil, blockContext.Time) - slotDB.Prepare(rules, txReq.msg.From, vmenv.Context.Coinbase, txReq.msg.To, vm.ActivePrecompiles(rules), txReq.msg.AccessList) - - // gasLimit not accurate, but it is ok for block import. - // each slot would use its own gas pool, and will do gas limit check later - gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() - - on := txReq.tx.Nonce() - if txReq.msg.IsDepositTx && p.config.IsOptimismRegolith(vmenv.Context.Time) { - on = slotDB.GetNonce(txReq.msg.From) - } - - slotDB.SetTxContext(txReq.tx.Hash(), txReq.txIndex) - evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv, p.delayGasFee) - txResult := ParallelTxResult{ - executedIndex: execNum, - slotIndex: slotIndex, - txReq: txReq, - receipt: nil, - slotDB: slotDB, - err: err, - gpSlot: gpSlot, - evm: evm, - result: result, - originalNonce: &on, - } - - if err == nil { - p.unconfirmedDBs.Store(txReq.txIndex, slotDB) - } else { - // the transaction failed at check(nonce or balance), actually it has not been executed yet. - // the error here can be either expected or unexpected. - // expected - the execution is correct and the error is normal result - // unexpected - the execution is incorrectly accessed the state because of parallelization. - // In both case, rerun with next version of stateDB, it is a waste and buggy to rerun with same - // version of stateDB that has been marked conflict. - // Therefore, treat it as conflict and rerun, leave the result to conflict check. - // Load conflict as it maybe updated by conflict checker or other execution slots. - // use old mIndex so that we can try the new one that is updated by other thread of merging - // during execution. - conflictIndex = txReq.conflictIndex.Load() - if conflictIndex < mIndex { - if txReq.conflictIndex.CompareAndSwap(conflictIndex, mIndex) { - log.Debug(fmt.Sprintf("Update conflictIndex in execution because of error: %s, new conflictIndex: %d", err.Error(), conflictIndex)) - } - } - atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1) - // the error could be caused by unconfirmed balance reference, - // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed - // redo could solve it. - log.Debug("In slot execution error", "error", err, - "slotIndex", slotIndex, "txIndex", txReq.txIndex) - } - p.unconfirmedResults.Store(txReq.txIndex, &txResult) - return &txResult -} - -// toConfirmTxIndex confirm a serial TxResults with same txIndex -func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bool) *ParallelTxResult { - if isStage2 { - if targetTxIndex <= int(p.mergedTxIndex.Load())+1 { - // `p.mergedTxIndex+1` is the one to be merged, - // in stage2, we do likely conflict check, for these not their turn. - return nil - } - } - - for { - // handle a targetTxIndex in a loop - var targetResult *ParallelTxResult - if isStage2 { - result, ok := p.unconfirmedResults.Load(targetTxIndex) - if !ok { - return nil - } - targetResult = result.(*ParallelTxResult) - - // in stage 2, don't schedule a new redo if the TxReq is: - // a.runnable: it will be redone - // b.running: the new result will be more reliable, we skip check right now - if atomic.LoadInt32(&targetResult.txReq.runnable) == 1 { - return nil - } - if targetResult.executedIndex < targetResult.txReq.executedNum.Load() { - // skip the intermediate result that is not the latest. - return nil - } - } else { - // pop one result as target result. - result, ok := p.pendingConfirmResults.LoadAndDelete(targetTxIndex) - if !ok { - return nil - } - targetResult = result.(*ParallelTxResult) - } - - valid := p.toConfirmTxIndexResult(targetResult, isStage2) - if !valid { - staticSlotIndex := targetResult.txReq.staticSlotIndex - conflictBase := targetResult.slotDB.BaseTxIndex() - conflictIndex := targetResult.txReq.conflictIndex.Load() - if conflictIndex < int32(conflictBase) { - if targetResult.txReq.conflictIndex.CompareAndSwap(conflictIndex, int32(conflictBase)) { - log.Debug("Update conflict index", "conflictIndex", conflictIndex, "conflictBase", conflictBase) - } - } - if isStage2 { - atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo - p.debugConflictRedoNum++ - // interrupt the slot's current routine, and switch to the other routine - p.switchSlot(staticSlotIndex) - return nil - } - - if _, ok := p.pendingConfirmResults.Load(targetTxIndex); !ok { // this is the last result to check, and it is not valid - // This means that the tx has been executed more than blockTxCount times, so it exits with the error. - if targetResult.txReq.txIndex == int(p.mergedTxIndex.Load())+1 && - targetResult.slotDB.BaseTxIndex() == int(p.mergedTxIndex.Load()) { - if targetResult.err != nil { - if false { // TODO: delete the printf - fmt.Printf("!!!!!!!!!!! Parallel execution exited with error!!!!!, txIndex:%d, err: %v\n", targetResult.txReq.txIndex, targetResult.err) - } - return targetResult - } else { - // abnormal exit with conflict error, need check the parallel algorithm - targetResult.err = ErrParallelUnexpectedConflict - if false { - fmt.Printf("!!!!!!!!!!! Parallel execution exited unexpected conflict!!!!!, txIndex:%d\n", targetResult.txReq.txIndex) - } - return targetResult - } - //} - } - atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo - p.debugConflictRedoNum++ - // interrupt its current routine, and switch to the other routine - p.switchSlot(staticSlotIndex) - // reclaim the result. - p.slotDBsToRelease.Store(targetResult.slotDB, targetResult.slotDB) - return nil - } - continue - } - if isStage2 { - // likely valid, but not sure, can not deliver - return nil - } - return targetResult - } -} - -// to confirm one txResult, return true if the result is valid -// if it is in Stage 2 it is a likely result, not 100% sure -func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool { - txReq := txResult.txReq - if p.hasConflict(txResult, isStage2) { - log.Info(fmt.Sprintf("HasConflict!! block: %d, txIndex: %d\n", txResult.txReq.block.NumberU64(), txResult.txReq.txIndex)) - return false - } - if isStage2 { // not its turn - return true // likely valid, not sure, not finalized right now. - } - - // goroutine unsafe operation will be handled from here for safety - gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() - if gasConsumed != txResult.result.UsedGas { - log.Error("gasConsumed != result.UsedGas mismatch", - "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) - } - - // ok, time to do finalize, stage2 should not be parallel - txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, - *txReq.msg, p.config, txResult.slotDB, txReq.block, - txReq.tx, txReq.usedGas, txResult.originalNonce) - return true -} - -func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { - curSlot := p.slotState[slotIndex] - var wakeupChan chan struct{} - var stopChan chan struct{} - - if slotType == parallelPrimarySlot { - wakeupChan = curSlot.primaryWakeUpChan - stopChan = curSlot.primaryStopChan - } else { - wakeupChan = curSlot.shadowWakeUpChan - stopChan = curSlot.shadowStopChan - } - - lastStartPos := 0 - for { - select { - case <-stopChan: - p.stopSlotChan <- struct{}{} - continue - case <-wakeupChan: - } - - interrupted := false - - for i := lastStartPos; i < len(curSlot.pendingTxReqList); i++ { - // for i, txReq := range curSlot.pendingTxReqList { - txReq := curSlot.pendingTxReqList[i] - if txReq.txIndex <= int(p.mergedTxIndex.Load()) { - continue - } - lastStartPos = i - - if txReq.conflictIndex.Load() > p.mergedTxIndex.Load() { - break - } - - if atomic.LoadInt32(&curSlot.activatedType) != slotType { - interrupted = true - break - } - - // first try next to be merged req. - nextIdx := p.mergedTxIndex.Load() + 1 - if nextIdx < int32(len(p.allTxReqs)) { - nextMergeReq := p.allTxReqs[nextIdx] - if nextMergeReq.runnable == 1 { - if atomic.CompareAndSwapInt32(&nextMergeReq.runnable, 1, 0) { - // execute. - res := p.executeInSlot(slotIndex, nextMergeReq) - if res != nil { - p.txResultChan <- res - } - } - } - } - - if txReq.runnable == 1 { - // try the next req in loop sequence. - if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { - continue - } - res := p.executeInSlot(slotIndex, txReq) - if res == nil { - continue - } - p.txResultChan <- res - } - } - // switched to the other slot. - if interrupted { - continue - } - - // txReq in this Slot have all been executed, try steal one from other slot. - // as long as the TxReq is runnable, we steal it, mark it as stolen - - for j := int(p.mergedTxIndex.Load()) + 1; j < len(p.allTxReqs); j++ { - stealTxReq := p.allTxReqs[j] - if stealTxReq.txIndex <= int(p.mergedTxIndex.Load()) { - continue - } - - if stealTxReq.conflictIndex.Load() > p.mergedTxIndex.Load() { - break - } - - if atomic.LoadInt32(&curSlot.activatedType) != slotType { - interrupted = true - break - } - - // first try next to be merged req. - nextIdx := p.mergedTxIndex.Load() + 1 - if nextIdx < int32(len(p.allTxReqs)) { - nextMergeReq := p.allTxReqs[nextIdx] - if nextMergeReq.runnable == 1 { - if atomic.CompareAndSwapInt32(&nextMergeReq.runnable, 1, 0) { - // execute. - res := p.executeInSlot(slotIndex, nextMergeReq) - if res != nil { - p.txResultChan <- res - } - } - } - } - - if stealTxReq.runnable == 1 { - if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) { - continue - } - res := p.executeInSlot(slotIndex, stealTxReq) - if res == nil { - continue - } - p.txResultChan <- res - } - } - } -} - -func (p *ParallelStateProcessor) runQuickMergeSlotLoop(slotIndex int, slotType int32) { - curSlot := p.slotState[slotIndex] - var wakeupChan chan struct{} - var stopChan chan struct{} - - if slotType == parallelPrimarySlot { - wakeupChan = curSlot.primaryWakeUpChan - stopChan = curSlot.primaryStopChan - } else { - wakeupChan = curSlot.shadowWakeUpChan - stopChan = curSlot.shadowStopChan - } - for { - select { - case <-stopChan: - p.stopSlotChan <- struct{}{} - continue - case <-wakeupChan: - } - - next := int(p.mergedTxIndex.Load()) + 1 - - executed := 5 - for i := next; i < len(p.allTxReqs); i++ { - txReq := p.allTxReqs[next] - if executed == 0 { - break - } - if txReq.txIndex <= int(p.mergedTxIndex.Load()) { - continue - } - if txReq.conflictIndex.Load() > p.mergedTxIndex.Load() { - break - } - if txReq.runnable == 1 { - if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { - continue - } - res := p.executeInSlot(slotIndex, txReq) - if res != nil { - executed-- - p.txResultChan <- res - } - } - } - } -} - -func (p *ParallelStateProcessor) runConfirmStage2Loop() { - for { - select { - case <-p.stopConfirmStage2Chan: - for len(p.confirmStage2Chan) > 0 { - <-p.confirmStage2Chan - } - p.stopSlotChan <- struct{}{} - continue - case <-p.confirmStage2Chan: - for len(p.confirmStage2Chan) > 0 { - <-p.confirmStage2Chan // drain the chan to get the latest merged txIndex - } - } - // stage 2,if all tx have been executed at least once, and its result has been received. - // in Stage 2, we will run check when merge is advanced. - // more aggressive tx result confirm, even for these Txs not in turn - startTxIndex := int(p.mergedTxIndex.Load()) + 2 // stage 2's will start from the next target merge index - endTxIndex := startTxIndex + stage2CheckNumber - txSize := len(p.allTxReqs) - if endTxIndex > (txSize - 1) { - endTxIndex = txSize - 1 - } - log.Debug("runConfirmStage2Loop", "startTxIndex", startTxIndex, "endTxIndex", endTxIndex) - for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ { - p.toConfirmTxIndex(txIndex, true) - } - // make sure all slots are wake up - for i := 0; i < p.parallelNum; i++ { - p.switchSlot(i) - } - } -} - -func (p *ParallelStateProcessor) handleTxResults() *ParallelTxResult { - confirmedResult := p.toConfirmTxIndex(int(p.mergedTxIndex.Load())+1, false) - if confirmedResult == nil { - return nil - } - // schedule stage 2 when new Tx has been merged, schedule once and ASAP - // stage 2,if all tx have been executed at least once, and its result has been received. - // in Stage 2, we will run check when main DB is advanced, i.e., new Tx result has been merged. - if p.inConfirmStage2 && int(p.mergedTxIndex.Load()) >= p.nextStage2TxIndex { - p.nextStage2TxIndex = int(p.mergedTxIndex.Load()) + stage2CheckNumber - p.confirmStage2Chan <- int(p.mergedTxIndex.Load()) - } - return confirmedResult -} - -// wait until the next Tx is executed and its result is merged to the main stateDB -func (p *ParallelStateProcessor) confirmTxResults(statedb *state.StateDB, gp *GasPool) *ParallelTxResult { - result := p.handleTxResults() - if result == nil { - return nil - } - // ok, the tx result is valid and can be merged - if result.err != nil { - return result - } - - if err := gp.SubGas(result.receipt.GasUsed); err != nil { - log.Error("gas limit reached", "block", result.txReq.block.Number(), - "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) - } - - resultTxIndex := result.txReq.txIndex - - var root []byte - header := result.txReq.block.Header() - - isByzantium := p.config.IsByzantium(header.Number) - isEIP158 := p.config.IsEIP158(header.Number) - result.slotDB.FinaliseForParallel(isByzantium || isEIP158, statedb) - - // merge slotDB into mainDB - statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex, result.result.delayFees) - - delayGasFee := result.result.delayFees - // add delayed gas fee - if delayGasFee != nil { - if delayGasFee.TipFee != nil { - statedb.AddBalance(delayGasFee.Coinbase, delayGasFee.TipFee) - } - if delayGasFee.BaseFee != nil { - statedb.AddBalance(params.OptimismBaseFeeRecipient, delayGasFee.BaseFee) - } - if delayGasFee.L1Fee != nil { - statedb.AddBalance(params.OptimismL1FeeRecipient, delayGasFee.L1Fee) - } - } - - // Do IntermediateRoot after mergeSlotDB. - if !isByzantium { - root = statedb.IntermediateRoot(isEIP158).Bytes() - } - result.receipt.PostState = root - - if resultTxIndex != int(p.mergedTxIndex.Load())+1 { - log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex, - "p.mergedTxIndex", p.mergedTxIndex.Load()) - } - p.mergedTxIndex.Store(int32(resultTxIndex)) - - // trigger all slot to run left conflicted txs - for _, slot := range p.slotState { - var wakeupChan chan struct{} - if slot.activatedType == parallelPrimarySlot { - wakeupChan = slot.primaryWakeUpChan - } else { - wakeupChan = slot.shadowWakeUpChan - } - select { - case wakeupChan <- struct{}{}: - default: - } - } - // schedule prefetch once only when unconfirmedResult is valid - if result.err == nil { - if _, ok := p.txReqExecuteRecord[resultTxIndex]; !ok { - p.txReqExecuteRecord[resultTxIndex] = 0 - p.txReqExecuteCount++ - statedb.AddrPrefetch(result.slotDB) - if !p.inConfirmStage2 && p.txReqExecuteCount == p.targetStage2Count { - p.inConfirmStage2 = true - } - } - p.txReqExecuteRecord[resultTxIndex]++ - } - // after merge, the slotDB will not accessible, reclaim the resource - p.slotDBsToRelease.Store(result.slotDB, result.slotDB) - return result -} - -func (p *ParallelStateProcessor) doCleanUp() { - // 1.clean up all slot: primary and shadow, to make sure they are stopped - for _, slot := range p.slotState { - slot.primaryStopChan <- struct{}{} - slot.shadowStopChan <- struct{}{} - <-p.stopSlotChan - <-p.stopSlotChan - } - // 2.discard delayed txResults if any - for { - if len(p.txResultChan) > 0 { - <-p.txResultChan - continue - } - break - } - // 3.make sure the confirmation routine is stopped - p.stopConfirmStage2Chan <- struct{}{} - <-p.stopSlotChan - - p.unconfirmedResults = nil - p.unconfirmedDBs = nil - p.pendingConfirmResults = nil - - go func() { - p.slotDBsToRelease.Range(func(key, value any) bool { - sdb := value.(*state.ParallelStateDB) - sdb.PutSyncPool(p.parallelDBManager) - return true - }) - }() -} - -// Process implements BEP-130 Parallel Transaction Execution -func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) { - var ( - usedGas = new(uint64) - header = block.Header() - gp = new(GasPool).AddGas(block.GasLimit()) - ) - - // Mutate the block and state according to any hard-fork specs - if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(statedb) - } - if p.config.PreContractForkBlock != nil && p.config.PreContractForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyPreContractHardFork(statedb) - } - - misc.EnsureCreate2Deployer(p.config, block.Time(), statedb) - - allTxs := block.Transactions() - p.resetState(len(allTxs), statedb) - - var ( - // with parallel mode, vmenv will be created inside of slot - blockContext = NewEVMBlockContext(block.Header(), p.bc, nil, p.config, statedb) - vmenv = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) - signer = types.MakeSigner(p.bc.chainConfig, block.Number(), block.Time()) - ) - - if beaconRoot := block.BeaconRoot(); beaconRoot != nil { - ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) - } - statedb.MarkFullProcessed() - txDAG := cfg.TxDAG - - txNum := len(allTxs) - latestExcludedTx := -1 - // Iterate over and process the individual transactions - p.commonTxs = make([]*types.Transaction, 0, txNum) - p.receipts = make([]*types.Receipt, 0, txNum) - - parallelNum := p.parallelNum - - if txNum > parallelNum*2 && txNum >= 4 { - var wg sync.WaitGroup - errChan := make(chan error) - - begin := 0 - // first try to find latestExcludeTx, as for opBNB, they are the first consecutive txs. - for idx := 0; idx < len(allTxs); idx++ { - if txDAG != nil && txDAG.TxDep(idx).CheckFlag(types.ExcludedTxFlag) { - if err := p.transferTxs(allTxs, idx, signer, block, statedb, cfg, usedGas, latestExcludedTx); err != nil { - return nil, nil, 0, err - } - latestExcludedTx = idx - } else { - begin = idx - break - } - } - - // Create a cancelable context - ctx, cancel := context.WithCancel(context.Background()) - - // Create a pool of workers - transactionsPerWorker := (len(allTxs) - begin) / parallelNum - - // Create a pool of workers - for i := 0; i < parallelNum; i++ { - wg.Add(1) - go func(start, end int, signer types.Signer, blk *types.Block, sdb *state.StateDB, cfg vm.Config, usedGas *uint64) { - defer wg.Done() - for j := start; j < end; j++ { - select { - case <-ctx.Done(): - return // Exit the goroutine if the context is canceled - default: - if err := p.transferTxs(allTxs, j, signer, block, statedb, cfg, usedGas, latestExcludedTx); err != nil { - errChan <- err - cancel() // Cancel the context to stop other goroutines - return - } - } - } - }(begin+i*transactionsPerWorker, begin+(i+1)*transactionsPerWorker, signer, block, statedb, cfg, usedGas) - } - - // Distribute any remaining transactions - for i := begin + parallelNum*transactionsPerWorker; i < len(allTxs); i++ { - if err := p.transferTxs(allTxs, i, signer, block, statedb, cfg, usedGas, latestExcludedTx); err != nil { - errChan <- err - cancel() // Cancel the context to stop other goroutines - } - } - - // Wait for all workers to finish and handle errors - go func() { - wg.Wait() - close(errChan) - }() - - for err := range errChan { - return nil, nil, 0, err - } - // - } else { - for i, tx := range allTxs { - msg, err := TransactionToMessage(tx, signer, header.BaseFee) - if err != nil { - return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) - } - - // find the latestDepTx from TxDAG or latestExcludedTx - latestDepTx := -1 - if dep := types.TxDependency(txDAG, i); len(dep) > 0 { - latestDepTx = int(dep[len(dep)-1]) - } - if latestDepTx < latestExcludedTx { - latestDepTx = latestExcludedTx - } - - // parallel start, wrap an exec message, which will be dispatched to a slot - txReq := &ParallelTxRequest{ - txIndex: i, - baseStateDB: statedb, - staticSlotIndex: -1, - tx: tx, - gasLimit: block.GasLimit(), // gp.Gas(). - msg: msg, - block: block, - vmConfig: cfg, - usedGas: usedGas, - curTxChan: make(chan int, 1), - runnable: 1, // 0: not runnable, 1: runnable - useDAG: txDAG != nil, - } - txReq.executedNum.Store(0) - txReq.conflictIndex.Store(-2) - if latestDepTx >= 0 { - txReq.conflictIndex.Store(int32(latestDepTx)) - } - p.allTxReqs[i] = txReq - if txDAG != nil && txDAG.TxDep(i).CheckFlag(types.ExcludedTxFlag) { - latestExcludedTx = i - } - } - } - allTxCount := len(p.allTxReqs) - // set up stage2 enter criteria - p.targetStage2Count = allTxCount - if p.targetStage2Count > 50 { - // usually, the last Tx could be the bottleneck it could be very slow, - // so it is better for us to enter stage 2 a bit earlier - p.targetStage2Count = p.targetStage2Count - stage2AheadNum - } - - p.delayGasFee = false - p.doStaticDispatch(p.allTxReqs) - if txDAG != nil && txDAG.DelayGasFeeDistribution() { - p.delayGasFee = true - } - - // after static dispatch, we notify the slot to work. - for _, slot := range p.slotState { - slot.primaryWakeUpChan <- struct{}{} - } - - // kick off the result handler. - p.resultProcessChan <- &ResultHandleEnv{statedb: statedb, gp: gp, txCount: allTxCount} - for { - if int(p.mergedTxIndex.Load())+1 == allTxCount { - // put it ahead of chan receive to avoid waiting for empty block - break - } - unconfirmedResult := <-p.txResultChan - if unconfirmedResult.txReq == nil { - // all tx results are merged. - break - } - - unconfirmedTxIndex := unconfirmedResult.txReq.txIndex - if unconfirmedTxIndex <= int(p.mergedTxIndex.Load()) { - log.Debug("drop merged txReq", "unconfirmedTxIndex", unconfirmedTxIndex, "p.mergedTxIndex", p.mergedTxIndex.Load()) - continue - } - prevResult, ok := p.pendingConfirmResults.Load(unconfirmedTxIndex) - if !ok || prevResult.(*ParallelTxResult).slotDB.BaseTxIndex() < unconfirmedResult.slotDB.BaseTxIndex() { - p.pendingConfirmResults.Store(unconfirmedTxIndex, unconfirmedResult) - p.resultAppendChan <- struct{}{} - } - } - - // clean up when the block is processed - p.doCleanUp() - - if p.error != nil { - return nil, nil, 0, p.error - } - - // len(commonTxs) could be 0, such as: https://bscscan.com/block/14580486 - // all txs have been merged at this point, no need to acquire the lock of commonTxs - if p.mergedTxIndex.Load() >= 0 && p.debugConflictRedoNum > 0 { - log.Info("ProcessParallel tx all done", "block", header.Number, "usedGas", *usedGas, - "txNum", txNum, - "len(commonTxs)", len(p.commonTxs), - "conflictNum", p.debugConflictRedoNum, - "redoRate(%)", 100*(p.debugConflictRedoNum)/len(p.commonTxs), - "txDAG", txDAG != nil) - } - if metrics.EnabledExpensive { - parallelTxNumMeter.Mark(int64(len(p.commonTxs))) - parallelConflictTxNumMeter.Mark(int64(p.debugConflictRedoNum)) - } - - // Fail if Shanghai not enabled and len(withdrawals) is non-zero. - withdrawals := block.Withdrawals() - if len(withdrawals) > 0 && !p.config.IsShanghai(block.Number(), block.Time()) { - return nil, nil, 0, errors.New("withdrawals before shanghai") - } - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - p.engine.Finalize(p.bc, header, statedb, p.commonTxs, block.Uncles(), withdrawals) - - var allLogs []*types.Log - for _, receipt := range p.receipts { - allLogs = append(allLogs, receipt.Logs...) - } - return p.receipts, allLogs, *usedGas, nil -} - -func (p *ParallelStateProcessor) handlePendingResultLoop() { - var info *ResultHandleEnv - var stateDB *state.StateDB - var gp *GasPool - var txCount int - for { - select { - case info = <-p.resultProcessChan: - stateDB = info.statedb - gp = info.gp - txCount = info.txCount - log.Debug("handlePendingResult get Env", "stateDBTx", stateDB.TxIndex(), "gp", gp.String(), "txCount", txCount) - case <-p.resultAppendChan: - } - - // if all merged, notify the main routine. continue to wait for next block. - if p.error != nil || p.mergedTxIndex.Load()+1 == int32(txCount) { - // log.Info("handlePendingResult merged all") - p.txResultChan <- &ParallelTxResult{txReq: nil, result: nil} - // clear the pending chan. - for len(p.resultAppendChan) > 0 { - <-p.resultAppendChan - } - continue - } - // busy waiting. - for { - nextTxIndex := int(p.mergedTxIndex.Load()) + 1 - if p.error != nil || nextTxIndex == txCount { - p.txResultChan <- &ParallelTxResult{txReq: nil, result: nil} - // clear the pending chan. - for len(p.resultAppendChan) > 0 { - <-p.resultAppendChan - } - break - } - if _, ok := p.pendingConfirmResults.Load(nextTxIndex); !ok { - break - } - log.Debug("Start to check result", "TxIndex", int(nextTxIndex), "stateDBTx", stateDB.TxIndex(), "gp", gp.String()) - - result := p.confirmTxResults(stateDB, gp) - if result == nil { - break - } else { - log.Debug("in Confirm Loop - after confirmTxResults", - "mergedIndex", p.mergedTxIndex.Load(), - "confirmedIndex", result.txReq.txIndex, - "result.err", result.err) - } - p.resultMutex.Lock() - // update tx result - if result.err != nil { - log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, - "resultTxIndex", result.txReq.txIndex, "result.err", result.err) - p.error = fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) - p.resultMutex.Unlock() - continue - } - p.commonTxs = append(p.commonTxs, result.txReq.tx) - p.receipts = append(p.receipts, result.receipt) - p.resultMutex.Unlock() - } - } -} - -func (p *ParallelStateProcessor) transferTxs(txs types.Transactions, i int, signer types.Signer, block *types.Block, statedb *state.StateDB, cfg vm.Config, usedGas *uint64, latestExcludedTx int) error { - if p.allTxReqs[i] != nil { - return nil - } - tx := txs[i] - txDAG := cfg.TxDAG - msg, err := TransactionToMessage(tx, signer, block.Header().BaseFee) - if err != nil { - return fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) - } - - // find the latestDepTx from TxDAG or latestExcludedTx - latestDepTx := -1 - if dep := types.TxDependency(txDAG, i); len(dep) > 0 { - latestDepTx = int(dep[len(dep)-1]) - } - if latestDepTx < latestExcludedTx { - latestDepTx = latestExcludedTx - } - - // parallel start, wrap an exec message, which will be dispatched to a slot - txReq := &ParallelTxRequest{ - txIndex: i, - baseStateDB: statedb, - staticSlotIndex: -1, - tx: tx, - gasLimit: block.GasLimit(), // gp.Gas(). - msg: msg, - block: block, - vmConfig: cfg, - usedGas: usedGas, - curTxChan: make(chan int, 1), - runnable: 1, // 0: not runnable, 1: runnable - useDAG: txDAG != nil, - } - txReq.executedNum.Store(0) - txReq.conflictIndex.Store(-2) - if latestDepTx >= 0 { - txReq.conflictIndex.Store(int32(latestDepTx)) - } - p.allTxReqs[i] = txReq - return nil -} - -func applyTransactionStageExecution(msg *Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM, delayGasFee bool) (*vm.EVM, *ExecutionResult, error) { - // Create a new context to be used in the EVM environment. - txContext := NewEVMTxContext(msg) - evm.Reset(txContext, statedb) - - // Apply the transaction to the current state (included in the env). - var ( - result *ExecutionResult - err error - ) - if delayGasFee { - result, err = ApplyMessageDelayGasFee(evm, msg, gp) - } else { - result, err = ApplyMessage(evm, msg, gp) - } - - if err != nil { - return nil, nil, err - } - - return evm, result, err -} - -func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg Message, config *params.ChainConfig, statedb *state.ParallelStateDB, block *types.Block, tx *types.Transaction, usedGas *uint64, nonce *uint64) (*types.Receipt, error) { - - *usedGas += result.UsedGas - // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx. - receipt := &types.Receipt{Type: tx.Type(), PostState: nil, CumulativeGasUsed: *usedGas} - if result.Failed() { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } - receipt.TxHash = tx.Hash() - receipt.GasUsed = result.UsedGas - - if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { - // The actual nonce for deposit transactions is only recorded from Regolith onwards and - // otherwise must be nil. - receipt.DepositNonce = nonce - // The DepositReceiptVersion for deposit transactions is only recorded from Canyon onwards - // and otherwise must be nil. - if config.IsOptimismCanyon(evm.Context.Time) { - receipt.DepositReceiptVersion = new(uint64) - *receipt.DepositReceiptVersion = types.CanyonDepositReceiptVersion - } - } - if tx.Type() == types.BlobTxType { - receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) - receipt.BlobGasPrice = evm.Context.BlobBaseFee - } - // If the transaction created a contract, store the creation address in the receipt. - if msg.To == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, *nonce) - } - // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash(), block.NumberU64(), block.Hash()) - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - receipt.BlockHash = block.Hash() - receipt.BlockNumber = block.Number() - receipt.TransactionIndex = uint(statedb.TxIndex()) - return receipt, nil -} diff --git a/core/state/parallel_statedb.go b/core/state/parallel_statedb.go deleted file mode 100644 index 6321dfb9d1..0000000000 --- a/core/state/parallel_statedb.go +++ /dev/null @@ -1,1901 +0,0 @@ -package state - -import ( - "bytes" - "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/holiman/uint256" - "runtime" - "sort" - "sync" -) - -const defaultNumOfSlots = 5 - -var parallelKvOnce sync.Once - -type ParallelKvCheckUnit struct { - addr common.Address - key common.Hash - val common.Hash -} - -type ParallelKvCheckMessage struct { - slotDB *ParallelStateDB - isStage2 bool - kvUnit ParallelKvCheckUnit -} - -var parallelKvCheckReqCh chan ParallelKvCheckMessage -var parallelKvCheckResCh chan bool - -type ParallelStateDB struct { - StateDB -} - -func (s *ParallelStateDB) GetRefund() uint64 { - return s.refund -} - -func (s *ParallelStateDB) AddressInAccessList(addr common.Address) bool { - return s.accessList.ContainsAddress(addr) -} - -func (s *ParallelStateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) { - return s.accessList.Contains(addr, slot) -} - -func (s *ParallelStateDB) AddAddressToAccessList(addr common.Address) { - if s.accessList.AddAddress(addr) { - s.journal.append(accessListAddAccountChange{&addr}) - } -} - -func (s *ParallelStateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { - addrMod, slotMod := s.accessList.AddSlot(addr, slot) - if addrMod { - // In practice, this should not happen, since there is no way to enter the - // scope of 'address' without having the 'address' become already added - // to the access list (via call-variant, create, etc). - // Better safe than sorry, though - s.journal.append(accessListAddAccountChange{&addr}) - } - if slotMod { - s.journal.append(accessListAddSlotChange{ - address: &addr, - slot: &slot, - }) - } -} - -func (s *ParallelStateDB) Snapshot() int { - id := s.nextRevisionId - s.nextRevisionId++ - s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) - return id -} - -func hasKvConflict(slotDB *ParallelStateDB, addr common.Address, key common.Hash, val common.Hash, isStage2 bool) bool { - mainDB := slotDB.parallel.baseStateDB - - if isStage2 { // update slotDB's unconfirmed DB list and try - if slotDB.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return false - } - if valUnconfirm, ok := slotDB.getKVFromUnconfirmedDB(addr, key); ok { - if !bytes.Equal(val.Bytes(), valUnconfirm.Bytes()) { - log.Debug("IsSlotDBReadsValid KV read is invalid in unconfirmed", "addr", addr, - "valSlot", val, "valUnconfirm", valUnconfirm, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return true - } - } - } - valMain := slotDB.getStateFromMainNoUpdate(addr, key) // mainDB.GetStateNoUpdate(addr, key) - - if !bytes.Equal(val.Bytes(), valMain.Bytes()) { - log.Debug("hasKvConflict is invalid", "addr", addr, - "key", key, "valSlot", val, - "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, - "mainDB.TxIndex", mainDB.TxIndex()) - return true // return false, Range will be terminated. - } - return false -} - -// StartKvCheckLoop start several routines to do conflict check -func StartKvCheckLoop() { - parallelKvCheckReqCh = make(chan ParallelKvCheckMessage, 200) - parallelKvCheckResCh = make(chan bool, 10) - for i := 0; i < runtime.NumCPU(); i++ { - go func() { - for { - kvEle1 := <-parallelKvCheckReqCh - parallelKvCheckResCh <- hasKvConflict(kvEle1.slotDB, kvEle1.kvUnit.addr, - kvEle1.kvUnit.key, kvEle1.kvUnit.val, kvEle1.isStage2) - } - }() - } -} - -// NewSlotDB creates a new State DB based on the provided StateDB. -// With parallel, each execution slot would have its own StateDB. -// This method must be called after the baseDB call PrepareParallel() -func NewSlotDB(db *StateDB, txIndex int, baseTxIndex int, manager *ParallelDBManager, unconfirmedDBs *sync.Map, useDAG bool) *ParallelStateDB { - slotDB := db.CopyForSlot(manager) - slotDB.txIndex = txIndex - slotDB.originalRoot = db.originalRoot - slotDB.parallel.baseStateDB = db - slotDB.parallel.baseTxIndex = baseTxIndex - slotDB.parallel.unconfirmedDBs = unconfirmedDBs - slotDB.parallel.useDAG = useDAG - return slotDB -} - -func (s *ParallelStateDB) PutSyncPool(parallelDBManager *ParallelDBManager) { - for key := range s.parallel.locatStateObjects { - delete(s.parallel.locatStateObjects, key) - } - addressToStateObjectsPool.Put(s.parallel.locatStateObjects) - - for key := range s.parallel.codeReadsInSlot { - delete(s.parallel.codeReadsInSlot, key) - } - addressToBytesPool.Put(s.parallel.codeReadsInSlot) - - for key := range s.parallel.codeHashReadsInSlot { - delete(s.parallel.codeHashReadsInSlot, key) - } - addressToHashPool.Put(s.parallel.codeHashReadsInSlot) - - for key := range s.parallel.codeChangesInSlot { - delete(s.parallel.codeChangesInSlot, key) - } - addressToStructPool.Put(s.parallel.codeChangesInSlot) - - for key := range s.parallel.kvChangesInSlot { - delete(s.parallel.kvChangesInSlot, key) - } - addressToStateKeysPool.Put(s.parallel.kvChangesInSlot) - - for key := range s.parallel.kvReadsInSlot { - delete(s.parallel.kvReadsInSlot, key) - } - addressToStoragePool.Put(s.parallel.kvReadsInSlot) - - for key := range s.parallel.balanceChangesInSlot { - delete(s.parallel.balanceChangesInSlot, key) - } - addressToStructPool.Put(s.parallel.balanceChangesInSlot) - - for key := range s.parallel.balanceReadsInSlot { - delete(s.parallel.balanceReadsInSlot, key) - } - balancePool.Put(s.parallel.balanceReadsInSlot) - - for key := range s.parallel.addrStateReadsInSlot { - delete(s.parallel.addrStateReadsInSlot, key) - } - addressToBoolPool.Put(s.parallel.addrStateReadsInSlot) - - for key := range s.parallel.addrStateChangesInSlot { - delete(s.parallel.addrStateChangesInSlot, key) - } - addressToBoolPool.Put(s.parallel.addrStateChangesInSlot) - - for key := range s.parallel.nonceChangesInSlot { - delete(s.parallel.nonceChangesInSlot, key) - } - addressToStructPool.Put(s.parallel.nonceChangesInSlot) - - for key := range s.parallel.nonceReadsInSlot { - delete(s.parallel.nonceReadsInSlot, key) - } - addressToUintPool.Put(s.parallel.nonceReadsInSlot) - - for key := range s.parallel.addrSnapDestructsReadsInSlot { - delete(s.parallel.addrSnapDestructsReadsInSlot, key) - } - addressToBoolPool.Put(s.parallel.addrSnapDestructsReadsInSlot) - - for key := range s.parallel.dirtiedStateObjectsInSlot { - delete(s.parallel.dirtiedStateObjectsInSlot, key) - } - addressToStateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) - - for key := range s.stateObjectsPending { - delete(s.stateObjectsPending, key) - } - addressToStructPool.Put(s.stateObjectsPending) - - for key := range s.stateObjectsDirty { - delete(s.stateObjectsDirty, key) - } - addressToStructPool.Put(s.stateObjectsDirty) - - for key := range s.logs { - delete(s.logs, key) - } - logsPool.Put(s.logs) - - for key := range s.journal.dirties { - delete(s.journal.dirties, key) - } - s.journal.entries = s.journal.entries[:0] - journalPool.Put(s.journal) - - for key := range s.snapDestructs { - delete(s.snapDestructs, key) - } - addressToStructPool.Put(s.snapDestructs) - - for key := range s.parallel.createdObjectRecord { - delete(s.parallel.createdObjectRecord, key) - } - addressToStructPool.Put(s.parallel.createdObjectRecord) - - s.reset() - parallelDBManager.reclaim(s) -} - -// getStateDBBasePtr get the pointer of parallelStateDB. -func (s *ParallelStateDB) getStateDBBasePtr() *StateDB { - return &s.StateDB -} - -func (s *ParallelStateDB) SetSlotIndex(index int) { - s.parallel.SlotIndex = index -} - -// getStateObject get the state object from parallel stateDB for journal revert. -// for parallel execution, try to get dirty StateObject in slot first. -func (s *ParallelStateDB) getStateObject(addr common.Address) *stateObject { - var object *stateObject - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if obj.deleted { - return nil - } - object = obj - } else { - object = s.getStateObjectNoSlot(addr) - } - return object -} - -func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *stateObject) { - // The object could be created in SlotDB, if it got the object from DB and - // update it to the `s.parallel.stateObjects` - s.parallel.locatStateObjects[addr] = stateObject -} - -func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *stateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj - } - return nil -} - -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. - -// prev is used for CreateAccount to get its balance -// Parallel mode: -// if prev in dirty: revert is ok -// if prev in unconfirmed DB: addr state read record, revert should not put it back -// if prev in main DB: addr state read record, revert should not put it back -// if pre no exist: addr state read record, - -// `prev` is used to handle revert, to recover with the `prev` object -// In Parallel mode, we only need to recover to `prev` in SlotDB, -// -// a.if it is not in SlotDB, `revert` will remove it from the SlotDB -// b.if it is existed in SlotDB, `revert` will recover to the `prev` in SlotDB -// c.as `snapDestructs` it is the same -func (s *ParallelStateDB) createObject(addr common.Address) (newobj *stateObject) { - var prev *stateObject = nil - readFromDB := false - prevdestruct := false - if object, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - prev = object - } else { - object, ok = s.getStateObjectFromUnconfirmedDB(addr) - if ok { - prev = object - readFromDB = true - } else { - object = s.getDeletedStateObject(addr) // try to get from base db - if object != nil { - prev = object - readFromDB = true - } - } - } - // There can be tx0 create an obj at addr0, tx1 destruct it, and tx2 recreate it use create2. - // so if tx0 is finalized, and tx1 is unconfirmed, we have to check the states of unconfirmed, otherwise there - // will be wrong behavior that we recreate an object that is already there. see. test "TestDeleteThenCreate" - - if prev != nil { - // check slot - _, prevdestruct = s.getStateObjectsDestruct(prev.address) - - if !prevdestruct { - // set Destruct so later accesses in this transaction will not touch the obsoleted state. - s.setStateObjectsDestruct(prev.address, prev.origin) - if readFromDB { - // check nonSlot - s.snapParallelLock.RLock() - _, prevdestruct = s.snapDestructs[prev.address] - s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct - s.snapParallelLock.RUnlock() - } - if !prevdestruct { - s.snapParallelLock.Lock() - s.snapDestructs[prev.address] = struct{}{} - s.snapParallelLock.Unlock() - } - } - } - - newobj = newObject(s, s.isParallel, addr, nil) - newobj.setNonce(0) // sets the object to dirty - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) - } - - s.parallel.addrStateChangesInSlot[addr] = true // the object is created - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - // notice: all the KVs are cleared if any - s.parallel.kvChangesInSlot[addr] = make(StateKeys) - newobj.created = true - s.parallel.dirtiedStateObjectsInSlot[addr] = newobj - return newobj -} - -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *stateObject { - - // Prefer live objects if any is available - if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { - return obj - } - - data, ok := s.getStateObjectFromSnapshotOrTrie(addr) - if !ok { - return nil - } - - // this is why we have to use a separate getDeletedStateObject for ParallelStateDB - // `s` has to be the ParallelStateDB - obj := newObject(s, s.isParallel, addr, data) - s.storeStateObj(addr, obj) - return obj -} - -// GetOrNewStateObject retrieves a state object or create a new state object if nil. -// dirtyInSlot -> Unconfirmed DB (if not DAG) -> main DB -> snapshot, no? create one -func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *stateObject { - var object *stateObject - var ok bool - if object, ok = s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return object - } - - // try unconfirmedDB - object, _ = s.getStateObjectFromUnconfirmedDB(addr) - if object != nil { - // object found in unconfirmedDB, check existence - if object.deleted || object.selfDestructed { - object = s.createObject(addr) - s.parallel.addrStateReadsInSlot[addr] = false - return object - } - } else { - object = s.getStateObjectNoSlot(addr) // try to get from base db - } - // not found, or found in NoSlot or found in unconfirmed. - exist := true - if object == nil || object.deleted { - object = s.createObject(addr) - exist = false - } - s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist - return object -} - -// Exist reports whether the given account address exists in the state. -// Notably this also returns true for suicided accounts. -func (s *ParallelStateDB) Exist(addr common.Address) bool { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if obj.deleted { - log.Error("Exist in dirty, but marked as deleted or suicided", - "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) - return false - } - return true - } - // 2.Try to get from unconfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - return exist - } - - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, false); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return exist - } - - // 3.Try to get from main StateDB - exist := s.getStateObjectNoSlot(addr) != nil - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return exist -} - -// Empty returns whether the state object is either non-existent -// or empty according to the EIP161 specification (balance = nonce = code = 0) -func (s *ParallelStateDB) Empty(addr common.Address) bool { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // dirty object is light copied and fixup on need, - // empty could be wrong, except it is created with this TX - if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - return obj.empty() - } - // so we have to check it manually - // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash - if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero - return false - } - if s.GetNonce(addr) != 0 { - return false - } - codeHash := s.GetCodeHash(addr) - return bytes.Equal(codeHash.Bytes(), types.EmptyCodeHash.Bytes()) // code is empty, the object is empty - } - // 2.Try to get from unconfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - // exist means not empty - return !exist - } - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, true); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update read cache - return !exist - } - // 2.3 Try to get from NoSlot. - so := s.getStateObjectNoSlot(addr) - exist := so != nil - empty := (!exist) || so.empty() - - s.parallel.addrStateReadsInSlot[addr] = exist // update read cache - return empty -} - -// GetBalance retrieves the balance from the given address or 0 if object not found -// GetFrom the dirty list => from unconfirmed DB => get from main stateDB -func (s *ParallelStateDB) GetBalance(addr common.Address) *uint256.Int { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o.deleted { - return common.U2560 - } - dirtyObj = o - } - - // 1.Try to get from dirty - if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { - // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return dirtyObj.Balance() - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { - return balance - } - - balance := common.U2560 - // 2.2 Try to get from unconfirmed DB if exist - if blc := s.getBalanceFromUnconfirmedDB(addr); blc != nil { - balance = blc - } else { - // 3. Try to get from main StateObject - blc = common.U2560 - object := s.getStateObjectNoSlot(addr) - if object != nil { - blc = object.Balance() - } - balance = blc - } - if _, ok := s.parallel.balanceReadsInSlot[addr]; !ok { - s.parallel.balanceReadsInSlot[addr] = balance - } - - // fixup dirties - if dirtyObj != nil && dirtyObj.Balance() != balance { - dirtyObj.setBalance(balance) - } - - return balance -} - -func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o.deleted { - return 0 - } - dirtyObj = o - } - - // 1.Try to get from dirty - if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { - // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup nonce based on unconfirmed DB or main DB - return dirtyObj.Nonce() - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { - return nonce - } - - var nonce uint64 = 0 - // 2.2 Try to get from unconfirmed DB if exist - if nc, ok := s.getNonceFromUnconfirmedDB(addr); ok { - nonce = nc - } else { - // 3.Try to get from main StateDB - nc = 0 - object := s.getStateObjectNoSlot(addr) - if object != nil { - nc = object.Nonce() - } - nonce = nc - } - if _, ok := s.parallel.nonceReadsInSlot[addr]; !ok { - s.parallel.nonceReadsInSlot[addr] = nonce - } - // fixup dirties - if dirtyObj != nil && dirtyObj.Nonce() < nonce { - dirtyObj.setNonce(nonce) - } - return nonce -} - -func (s *ParallelStateDB) GetCode(addr common.Address) []byte { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o.deleted { - return nil - } - dirtyObj = o - } else { - dirtyObj = nil - } - - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - return dirtyObj.Code() - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return code - } - var code []byte - // 2.2 Try to get from unconfirmed DB if exist - if cd, ok := s.getCodeFromUnconfirmedDB(addr); ok { - code = cd - } else { - // 3. Try to get from main StateObject - object := s.getStateObjectNoSlot(addr) - if object != nil { - code = object.Code() - } - } - - if _, ok := s.parallel.codeReadsInSlot[addr]; !ok { - s.parallel.codeReadsInSlot[addr] = code - } - // fixup dirties - if dirtyObj != nil { - if dirtyObj.code == nil { - dirtyObj.code = code - } - if !bytes.Equal(dirtyObj.code, code) { - dirtyObj.code = code - } - } - return code -} - -func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o.deleted { - return 0 - } - dirtyObj = o - } - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - return dirtyObj.CodeSize() - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return len(code) // len(nil) is 0 too - } - - cs := 0 - var code []byte - // 2.2 Try to get from unconfirmed DB if exist - if cd, ok := s.getCodeFromUnconfirmedDB(addr); ok { - cs = len(cd) - code = cd - } else { - // 3. Try to get from main StateObject - var cc []byte - object := s.getStateObjectNoSlot(addr) - if object != nil { - // This is where we update the code from possible db.ContractCode if the original object.code is nil. - cc = object.Code() - cs = object.CodeSize() - } - code = cc - } - if _, ok := s.parallel.codeReadsInSlot[addr]; !ok { - s.parallel.codeReadsInSlot[addr] = code - } - // fixup dirties - if dirtyObj != nil { - if !bytes.Equal(dirtyObj.code, code) { - dirtyObj.code = code - } - } - return cs -} - -// GetCodeHash return: -// - common.Hash{}: the address does not exist -// - emptyCodeHash: the address exist, but code is empty -// - others: the address exist, and code is not empty -func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o.deleted { - return common.Hash{} - } - dirtyObj = o - } - - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return common.BytesToHash(dirtyObj.CodeHash()) - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { - return codeHash - } - codeHash := common.Hash{} - // 2.2 Try to get from unconfirmed DB if exist - if cHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { - codeHash = cHash - } else { - // 3. Try to get from main StateObject - object := s.getStateObjectNoSlot(addr) - - if object != nil { - codeHash = common.BytesToHash(object.CodeHash()) - } - } - if _, ok := s.parallel.codeHashReadsInSlot[addr]; !ok { - s.parallel.codeHashReadsInSlot[addr] = codeHash - } - - // fill slots in dirty if existed. - // A case for this: - // TX0: createAccount at addr 0x123, set code and codehash - // TX1: AddBalance - now an obj in dirty with empty codehash, and codeChangesInSlot is false (not changed) - // GetCodeHash - get from unconfirmedDB or mainDB, set codeHashReadsInSlot to the new val. - // SELFDESTRUCT - set codeChangesInSlot, but the obj in dirty is with Empty codehash. - // obj marked selfdestructed but not deleted. so CodeHash is not empty. - // GetCodeHash - since the codeChangesInslot is marked, get the object from dirty, and get the - // wrong 'empty' hash. - if dirtyObj != nil { - // found one - if dirtyObj.CodeHash() == nil || bytes.Equal(dirtyObj.CodeHash(), types.EmptyCodeHash.Bytes()) { - dirtyObj.data.CodeHash = codeHash.Bytes() - } - } - return codeHash -} - -// GetState retrieves a value from the given account's storage trie. -// For parallel mode wih, get from the state in order: -// -// -> self dirty, both Slot & MainProcessor -// -> pending of self: Slot on merge -// -> pending of unconfirmed DB -// -> pending of main StateDB -// -> origin -func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { - var dirtyObj *stateObject - // 0. Test whether it is deleted in dirty. - if o, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if o == nil || o.deleted { - return common.Hash{} - } - dirtyObj = o - } - // 1.Try to get from dirty - if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - if !exist { - // it should be able to get state from selfDestruct address within a Tx: - // e.g. within a transaction: call addr:selfDestruct -> get state: should be ok - log.Info("ParallelStateDB GetState suicided", "addr", addr, "hash", hash) - } else { - // It is possible that an object get created but not dirtied since there is no state set, such as recreate. - // In this case, simply return common.Hash{}. - // This is for corner case: - // B0: TX0 --> createAccount @addr1 -- merged into DB - // B1: Tx1 and Tx2 - // Tx1 account@addr1 selfDestruct -- unconfirmed - // Tx2 recreate account@addr2 -- executing - // Since any state change and suicide could record in s.parallel.addrStateChangeInSlot, it is save to simple - // return common.Hash{} for this case as the previous TX must has the object destructed. - // P.S. if the Tx2 both destruct and recreate the object, it will not fall into this logic, as the change - // will be recorded in dirtiedStateObjectsInSlot. - - // it could be suicided within this SlotDB? - // it should be able to get state from suicided address within a Tx: - // e.g. within a transaction: call addr:suicide -> get state: should be ok - - if dirtyObj == nil { - log.Error("ParallelStateDB GetState access untouched object after create, may check create2") - return common.Hash{} - } - return dirtyObj.GetState(hash) - } - } - - if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { - if _, ok := keys[hash]; ok { - return dirtyObj.GetState(hash) - } - } - // 2.Try to get from unconfirmed DB or main DB - // 2.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val - } - } - // 2.2 Object in dirty due to other changes, such as getBalance etc. - // load from dirty directly and the stateObject.GetState() will take care of the KvReadInSlot update. - // So there is no chance for create different objects with same address. (one in dirty and one from non-slot, and inconsistency) - if dirtyObj != nil { - return dirtyObj.GetState(hash) - } - - value := common.Hash{} - // 2.3 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - value = val - } else { - // 3.Get from main StateDB - object := s.getStateObjectNoSlot(addr) - val = common.Hash{} - if object != nil { - val = object.GetState(hash) - } - value = val - } - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - if _, ok := s.parallel.kvReadsInSlot[addr].GetValue(hash); !ok { - s.parallel.kvReadsInSlot[addr].StoreValue(hash, value) // update cache - } - - return value -} - -// GetCommittedState retrieves a value from the given account's committed storage trie. -// So it should not access/update dirty, and not check delete of dirty objects. -func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { - - // 1.Try to get from unconfirmed DB or main DB - // KVs in unconfirmed DB can be seen as pending storage - // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. - // 1.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val - } - } - value := common.Hash{} - // 1.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - value = val - } else { - // 2. Try to get from main DB - val = common.Hash{} - object := s.getStateObjectNoSlot(addr) - if object != nil { - val = object.GetCommittedState(hash) - } - value = val - } - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - if _, ok := s.parallel.kvReadsInSlot[addr].GetValue(hash); !ok { - s.parallel.kvReadsInSlot[addr].StoreValue(hash, value) // update cache - } - return value -} - -func (s *ParallelStateDB) HasSelfDestructed(addr common.Address) bool { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - if obj == nil || obj.deleted { - return false - } - return obj.selfDestructed - } - // 2.Try to get from unconfirmed - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr, false); ok { - return !exist - } - - object := s.getDeletedStateObject(addr) - if object != nil { - return object.selfDestructed - } - return false -} - -// AddBalance adds amount to the account associated with addr. -func (s *ParallelStateDB) AddBalance(addr common.Address, amount *uint256.Int) { - // add balance will perform a read operation first - // if amount == 0, no balance change, but there is still an empty check. - object := s.GetOrNewStateObject(addr) - if object != nil { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) // it will record the balance read operation - newStateObject.setBalance(balance) - newStateObject.AddBalance(amount) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.balanceChangesInSlot[addr] = struct{}{} - return - } - // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... - balance := s.GetBalance(addr) - if object.Balance().Cmp(balance) != 0 { - log.Warn("AddBalance in dirty, but balance has not do fixup", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", object.Balance(), "s.GetBalance(addr)", balance) - object.setBalance(balance) - } - - object.AddBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - } -} - -// SubBalance subtracts amount from the account associated with addr. -func (s *ParallelStateDB) SubBalance(addr common.Address, amount *uint256.Int) { - // unlike add, sub 0 balance will not touch empty object - object := s.GetOrNewStateObject(addr) - if object != nil { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - newStateObject.SubBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... - balance := s.GetBalance(addr) - if object.Balance().Cmp(balance) != 0 { - log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", object.Balance(), "s.GetBalance(addr)", balance) - object.setBalance(balance) - } - object.SubBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - } -} - -func (s *ParallelStateDB) SetBalance(addr common.Address, amount *uint256.Int) { - object := s.GetOrNewStateObject(addr) - if object != nil { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - // update balance for revert, in case child contract is reverted, - // it should revert to the previous balance - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - newStateObject.SetBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - - balance := s.GetBalance(addr) - object.setBalance(balance) - object.SetBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - } -} - -func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { - object := s.GetOrNewStateObject(addr) - if object != nil { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - noncePre := s.GetNonce(addr) - newStateObject.setNonce(noncePre) // nonce fixup - newStateObject.SetNonce(nonce) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - noncePre := s.GetNonce(addr) - object.setNonce(noncePre) // nonce fixup - object.SetNonce(nonce) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - } -} - -func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { - object := s.GetOrNewStateObject(addr) - if object != nil { - codeHash := crypto.Keccak256Hash(code) - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - newStateObject.setCode(codeHashPre, codePre) - newStateObject.SetCode(codeHash, code) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.codeChangesInSlot[addr] = struct{}{} - return - } - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - object.setCode(codeHashPre, codePre) - object.SetCode(codeHash, code) - s.parallel.codeChangesInSlot[addr] = struct{}{} - } -} - -func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { - object := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, - if object != nil { - if s.parallel.baseTxIndex+1 == s.txIndex { - if s.GetState(addr, key) == value { - log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, - "txIndex", s.txIndex, "addr", addr, - "key", key, "value", value) - return - } - } - - if s.parallel.kvChangesInSlot[addr] == nil { - s.parallel.kvChangesInSlot[addr] = make(StateKeys) - } - - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - newStateObject.SetState(key, value) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.addrStateChangesInSlot[addr] = true - return - } - // do State Update - object.SetState(key, value) - s.parallel.addrStateChangesInSlot[addr] = true - } -} - -// SelfDestruct marks the given account as suicided. -// This clears the account balance. -// -// The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after Suicide. -func (s *ParallelStateDB) SelfDestruct(addr common.Address) { - var object *stateObject - // 1.Try to get from dirty, it could be suicided inside of contract call - object = s.parallel.dirtiedStateObjectsInSlot[addr] - - if object != nil && object.deleted { - return - } - - if object == nil { - // 2.Try to get from unconfirmed, if deleted return false, since the address does not exist - if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { - object = obj - // Treat selfDestructed in unconfirmedDB as deleted since it will be finalised at merge phase. - deleted := object.deleted || object.selfDestructed - s.parallel.addrStateReadsInSlot[addr] = !deleted // true: exist, false: deleted - if deleted { - return - } - } - } - - if object == nil { - // 3.Try to get from main StateDB - object = s.getStateObjectNoSlot(addr) - if object == nil || object.deleted { - s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted - return - } - s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted - } - - s.journal.append(selfDestructChange{ - account: &addr, - prev: object.selfDestructed, // todo: must be false? - prevbalance: new(uint256.Int).Set(s.GetBalance(addr)), - }) - - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := object.lightCopy(s) - newStateObject.markSelfdestructed() - newStateObject.setBalance(new(uint256.Int)) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.addrStateChangesInSlot[addr] = false - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - return - } - - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist anymore - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - object.markSelfdestructed() - object.setBalance(new(uint256.Int)) -} - -func (s *ParallelStateDB) Selfdestruct6780(addr common.Address) { - object := s.getStateObject(addr) - if object == nil { - return - } - if object.created { - s.SelfDestruct(addr) - } -} - -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. -func (s *ParallelStateDB) CreateAccount(addr common.Address) { - // no matter it is got from dirty, unconfirmed or main DB - // if addr not exist, preBalance will be common.U2560, it is same as new(uint256.Int) which - // is the value newObject(), - preBalance := s.GetBalance(addr) - newObj := s.createObject(addr) - newObj.setBalance(new(uint256.Int).Set(preBalance)) // new uint256.Int for newObj - -} - -// RevertToSnapshot reverts all state changes made since the given revision. -func (s *ParallelStateDB) RevertToSnapshot(revid int) { - // Find the snapshot in the stack of valid snapshots. - idx := sort.Search(len(s.validRevisions), func(i int) bool { - return s.validRevisions[i].id >= revid - }) - if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { - panic(fmt.Errorf("revision id %v cannot be reverted", revid)) - } - snapshot := s.validRevisions[idx].journalIndex - - // Replay the journal to undo changes and remove invalidated snapshots - s.journal.revert(s, snapshot) - s.validRevisions = s.validRevisions[:idx] -} - -// AddRefund adds gas to the refund counter -// journal.append will use ParallelState for revert -func (s *ParallelStateDB) AddRefund(gas uint64) { - s.journal.append(refundChange{prev: s.refund}) - s.refund += gas -} - -// SubRefund removes gas from the refund counter. -// This method will panic if the refund counter goes below zero -func (s *ParallelStateDB) SubRefund(gas uint64) { - s.journal.append(refundChange{prev: s.refund}) - if gas > s.refund { - // we don't need to panic here if we read the wrong state in parallel mode - // we just need to redo this transaction - log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) - s.parallel.needsRedo = true - return - } - s.refund -= gas -} - -// For Parallel Execution Mode, it can be seen as Penetrated Access: -// -// ------------------------------------------------------- -// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | -// ------------------------------------------------------- -// -// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 -func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *uint256.Int { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return nil - } - for i := s.txIndex - 1; i >= 0 && i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot - balanceHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - balanceHit = true - } - if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { - balanceHit = true - } - if !balanceHit { - continue - } - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - balance := obj.Balance() - if obj.deleted { - balance = common.U2560 - } - return balance - } - return nil -} - -// Similar to getBalanceFromUnconfirmedDB -func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return 0, false - } - - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - - nonceHit := false - if _, ok := db.parallel.addrStateChangesInSlot[addr]; ok { - nonceHit = true - } else if _, ok := db.parallel.nonceChangesInSlot[addr]; ok { - nonceHit = true - } - if !nonceHit { - // nonce refer not hit, try next unconfirmedDb - continue - } - // nonce hit, return the nonce - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - // deleted object with nonce == 0 - if obj.deleted || obj.selfDestructed { - return 0, true - } - nonce := obj.Nonce() - return nonce, true - } - return 0, false -} - -// Similar to getBalanceFromUnconfirmedDB -// It is not only for code, but also codeHash and codeSize, we return the *stateObject for convenience. -func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return nil, false - } - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - - codeHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - codeHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - codeHit = true - } - if !codeHit { - // try next unconfirmedDb - continue - } - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - if obj.deleted || obj.selfDestructed { - return nil, true - } - code := obj.Code() - return code, true - } - return nil, false -} - -// Similar to getCodeFromUnconfirmedDB -// but differ when address is deleted or not exist -func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return common.Hash{}, false - } - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - - hashHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - hashHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - hashHit = true - } - if !hashHit { - // try next unconfirmedDb - continue - } - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - if obj.deleted || obj.selfDestructed { - return common.Hash{}, true - } - codeHash := common.BytesToHash(obj.CodeHash()) - return codeHash, true - } - return common.Hash{}, false -} - -// Similar to getCodeFromUnconfirmedDB -// It is for address state check of: Exist(), Empty() and HasSuicided() -// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` -// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address, testEmpty bool) (bool, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return false, false - } - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } else { - if obj.selfDestructed || obj.deleted { - return false, true - } - if testEmpty && obj.empty() { - return false, true - } - } - return exist, true - } - } - return false, false -} - -func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return common.Hash{}, false - } - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - if _, ok := db.parallel.kvChangesInSlot[addr]; ok { - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj.deleted || obj.selfDestructed { - return common.Hash{}, true - } - // The dirty object in unconfirmed DB will never be finalised and changed after execution. - // So no storageRecordsLock requried. - if val, exist := obj.dirtyStorage.GetValue(key); exist { - return val, true - } - } - } - return common.Hash{}, false -} - -func (s *ParallelStateDB) GetStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) { - return s.getStateObjectFromUnconfirmedDB(addr) -} - -func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*stateObject, bool) { - if s.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return nil, false - } - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.BaseTxIndex(); i-- { - db_, ok := s.parallel.unconfirmedDBs.Load(i) - if !ok { - continue - } - db := db_.(*ParallelStateDB) - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return obj, true - } - } - return nil, false -} - -func (s *ParallelStateDB) getStateObjectFromMainDBNoUpdate(addr common.Address) *stateObject { - var mainObj *stateObject - - if m, ok := s.parallel.conflictCheckStateObjectCache.Load(addr); ok { - mainObj = m.(*stateObject) - return mainObj - } else { - mainDB := s.parallel.baseStateDB - mainObj = mainDB.getStateObjectNoUpdate(addr) - s.parallel.conflictCheckStateObjectCache.Store(addr, mainObj) - } - return mainObj -} - -// GetStateNoUpdate retrieves a value from the given account's storage trie, but do not update the db.stateObjects cache. -func (s *ParallelStateDB) getStateFromMainNoUpdate(addr common.Address, key common.Hash) (ret common.Hash) { - - if kvPair, ok := s.parallel.conflictCheckKVReadCache.Load(addr); !ok { - s.parallel.conflictCheckKVReadCache.Store(addr, newStorage(true)) - } else { - st := kvPair.(*StorageSyncMap) - if val, ok := st.GetValue(key); ok { - return val - } - } - val := common.Hash{} - object := s.getStateObjectFromMainDBNoUpdate(addr) - if object != nil { - val = object.GetStateNoUpdate(key) - } - if kvPair, ok := s.parallel.conflictCheckKVReadCache.Load(addr); ok { - st := kvPair.(*StorageSyncMap) - st.StoreValue(key, val) - s.parallel.conflictCheckKVReadCache.Store(addr, st) - } - - return val -} - -// IsParallelReadsValid If stage2 is true, it is a likely conflict check, -// to detect these potential conflict results in advance and schedule redo ASAP. -func (slotDB *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { - parallelKvOnce.Do(func() { - StartKvCheckLoop() - }) - - mainDB := slotDB.parallel.baseStateDB - // conservatively use kvRead size as the initial size. - slotDB.parallel.conflictCheckStateObjectCache = new(sync.Map) - slotDB.parallel.conflictCheckKVReadCache = new(sync.Map) - - if isStage2 && slotDB.txIndex < mainDB.TxIndex() { - // already merged, no need to check - return true - } - - // for nonce - for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { - if isStage2 { // update slotDB's unconfirmed DB list and try - if slotDB.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return true - } - if nonceUnconfirm, ok := slotDB.getNonceFromUnconfirmedDB(addr); ok { - if nonceSlot != nonceUnconfirm { - log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr, - "nonceSlot", nonceSlot, "nonceUnconfirm", nonceUnconfirm, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - } - var nonceMain uint64 = 0 - mainObj := slotDB.getStateObjectFromMainDBNoUpdate(addr) - if mainObj != nil { - nonceMain = mainObj.Nonce() - } - if nonceSlot != nonceMain { - log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, - "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, - "mainIndex", mainDB.txIndex) - - return false - } - } - // balance - for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - if isStage2 { // update slotDB's unconfirmed DB list and try - if slotDB.parallel.useDAG { - // DAG never reads from unconfirmedDB, skip check. - return true - } - if balanceUnconfirm := slotDB.getBalanceFromUnconfirmedDB(addr); balanceUnconfirm != nil { - if balanceSlot.Cmp(balanceUnconfirm) == 0 { - continue - } - return false - } - } - - balanceMain := common.U2560 - mainObj := slotDB.getStateObjectFromMainDBNoUpdate(addr) - - if mainObj != nil { - balanceMain = mainObj.Balance() - } - - if balanceSlot.Cmp(balanceMain) != 0 { - log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, - "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, - "mainIndex", mainDB.txIndex) - return false - } - } - // check KV - var units []ParallelKvCheckUnit // todo: pre-allocate to make it faster - for addr, read := range slotDB.parallel.kvReadsInSlot { - read.Range(func(keySlot, valSlot interface{}) bool { - units = append(units, ParallelKvCheckUnit{addr, keySlot.(common.Hash), valSlot.(common.Hash)}) - return true - }) - } - readLen := len(units) - if readLen < 8 || isStage2 { - for _, unit := range units { - if hasKvConflict(slotDB, unit.addr, unit.key, unit.val, isStage2) { - return false - } - } - } else { - msgHandledNum := 0 - msgSendNum := 0 - for _, unit := range units { - for { // make sure the unit is consumed - consumed := false - select { - case conflict := <-parallelKvCheckResCh: - msgHandledNum++ - if conflict { - // make sure all request are handled or discarded - for { - if msgHandledNum == msgSendNum { - break - } - select { - case <-parallelKvCheckReqCh: - msgHandledNum++ - case <-parallelKvCheckResCh: - msgHandledNum++ - } - } - return false - } - case parallelKvCheckReqCh <- ParallelKvCheckMessage{slotDB, isStage2, unit}: - msgSendNum++ - consumed = true - } - if consumed { - break - } - } - } - for { - if msgHandledNum == readLen { - break - } - conflict := <-parallelKvCheckResCh - msgHandledNum++ - if conflict { - // make sure all request are handled or discarded - for { - if msgHandledNum == msgSendNum { - break - } - select { - case <-parallelKvCheckReqCh: - msgHandledNum++ - case <-parallelKvCheckResCh: - msgHandledNum++ - } - } - return false - } - } - } - - if isStage2 { // stage2 skip check code, or state, since they are likely unchanged. - return true - } - - // check code - for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { - var codeMain []byte = nil - object := slotDB.getStateObjectFromMainDBNoUpdate(addr) - if object != nil { - codeMain = object.Code() - } - if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, - "mainIndex", mainDB.txIndex) - return false - } - } - // check codeHash - for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { - codeHashMain := common.Hash{} - object := slotDB.getStateObjectFromMainDBNoUpdate(addr) - if object != nil { - codeHashMain = common.BytesToHash(object.CodeHash()) - } - if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, "mainIndex", mainDB.txIndex) - return false - } - } - // addr state check - for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { - stateMain := false // addr not exist - if slotDB.getStateObjectFromMainDBNoUpdate(addr) != nil { - stateMain = true // addr exist in main DB - } - if stateSlot != stateMain { - log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", - "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, "mainIndex", mainDB.txIndex) - return false - } - } - // snapshot destructs check - for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { - mainObj := mainDB.getDeletedStateObjectNoUpdate(addr) - if mainObj == nil { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", - "addr", addr, "destruct", destructRead, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - slotDB.snapParallelLock.RLock() // fixme: this lock is not needed - _, destructMain := mainDB.snapDestructs[addr] // addr not exist - slotDB.snapParallelLock.RUnlock() - if destructRead != destructMain && addr.Hex() != "0x0000000000000000000000000000000000000001" { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", - "addr", addr, "destructRead", destructRead, "destructMain", destructMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex, - "mainIndex", mainDB.txIndex) - return false - } - } - return true -} - -// NeedsRedo returns true if there is any clear reason that we need to redo this transaction -func (s *ParallelStateDB) NeedsRedo() bool { - return s.parallel.needsRedo -} - -// FinaliseForParallel finalises the state by removing the destructed objects and clears -// the journal as well as the refunds. Finalise, however, will not push any updates -// into the tries just yet. Only IntermediateRoot or Commit will do that. -// It also handles the mainDB dirties for the first TX. -func (s *ParallelStateDB) FinaliseForParallel(deleteEmptyObjects bool, mainDB *StateDB) { - addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) - - if s.TxIndex() == 0 && len(mainDB.journal.dirties) > 0 { - mainDB.stateObjectDestructLock.Lock() - for addr, acc := range mainDB.stateObjectsDestructDirty { - mainDB.stateObjectsDestruct[addr] = acc - } - mainDB.stateObjectsDestructDirty = make(map[common.Address]*types.StateAccount) - mainDB.stateObjectDestructLock.Unlock() - for addr := range mainDB.journal.dirties { - var obj *stateObject - var exist bool - obj, exist = mainDB.getStateObjectFromStateObjects(addr) - if !exist { - continue - } - - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { - - obj.deleted = true - - // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - mainDB.stateObjectDestructLock.Lock() - if _, ok := mainDB.stateObjectsDestruct[obj.address]; !ok { - mainDB.stateObjectsDestruct[obj.address] = obj.origin - } - mainDB.stateObjectDestructLock.Unlock() - // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then - // resurrect an account; but the snapshotter needs both events. - mainDB.AccountMux.Lock() - delete(mainDB.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(mainDB.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - mainDB.AccountMux.Unlock() - - mainDB.StorageMux.Lock() - delete(mainDB.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(mainDB.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) - mainDB.StorageMux.Unlock() - } else { - obj.finalise(true) // Prefetch slots in the background - } - - obj.created = false - mainDB.stateObjectsPending[addr] = struct{}{} - mainDB.stateObjectsDirty[addr] = struct{}{} - - // At this point, also ship the address off to the prefetch. The prefetcher - // will start loading tries, and when the change is eventually committed, - // the commit-phase will be a lot faster - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - mainDB.clearJournalAndRefund() - } - - for addr := range s.journal.dirties { - var obj *stateObject - var exist bool - if s.parallel.isSlotDB { - obj = s.parallel.dirtiedStateObjectsInSlot[addr] - if obj != nil { - exist = true - } else { - log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", - "addr", addr) - } - } else { - obj, exist = s.getStateObjectFromStateObjects(addr) - } - if !exist { - continue - } - - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { - obj.deleted = true - // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - // This is the thread local one, no need to acquire the stateObjectsDestructLock. - if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - s.stateObjectsDestruct[obj.address] = obj.origin - } - - // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then - // resurrect an account; but the snapshotter needs both events. - mainDB.AccountMux.Lock() - delete(mainDB.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(mainDB.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - mainDB.AccountMux.Unlock() - mainDB.StorageMux.Lock() - delete(mainDB.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(mainDB.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) - mainDB.StorageMux.Unlock() - } else { - // 1.none parallel mode, we do obj.finalise(true) as normal - // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine - // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken. - if !s.isParallel || !s.parallel.isSlotDB { - obj.finalise(true) // Prefetch slots in the background - } else { - // don't do finalise() here as to keep dirtyObjects unchanged in dirtyStorages, which avoid contention issue. - obj.fixUpOriginAndResetPendingStorage() - } - } - - if obj.created { - s.parallel.createdObjectRecord[addr] = struct{}{} - } - obj.created = false - - s.stateObjectsPending[addr] = struct{}{} - s.stateObjectsDirty[addr] = struct{}{} - - // At this point, also ship the address off to the prefetcher. The prefetcher - // will start loading tries, and when the change is eventually committed, - // the commit-phase will be a lot faster - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - - if mainDB.prefetcher != nil && len(addressesToPrefetch) > 0 { - mainDB.trieParallelLock.Lock() - mainDB.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) - mainDB.trieParallelLock.Unlock() - } - // Invalidate journal because reverting across transactions is not allowed. - s.clearJournalAndRefund() -} - -func (s *ParallelStateDB) reset() { - - s.StateDB.db = nil - s.StateDB.prefetcher = nil - s.StateDB.trie = nil - s.StateDB.noTrie = false - s.StateDB.hasher = crypto.NewKeccakState() - s.StateDB.snaps = nil - s.StateDB.snap = nil - s.StateDB.snapParallelLock = sync.RWMutex{} - s.StateDB.trieParallelLock = sync.Mutex{} - s.StateDB.stateObjectDestructLock = sync.RWMutex{} - s.StateDB.snapDestructs = addressToStructPool.Get().(map[common.Address]struct{}) - s.StateDB.originalRoot = common.Hash{} - s.StateDB.expectedRoot = common.Hash{} - s.StateDB.stateRoot = common.Hash{} - s.StateDB.fullProcessed = false - s.StateDB.AccountMux = sync.Mutex{} - s.StateDB.StorageMux = sync.Mutex{} - s.StateDB.accounts = make(map[common.Hash][]byte) - s.StateDB.storages = make(map[common.Hash]map[common.Hash][]byte) - s.StateDB.accountsOrigin = make(map[common.Address][]byte) - s.StateDB.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.StateDB.stateObjects = make(map[common.Address]*stateObject) // replaced by parallel.stateObjects in parallel mode - s.StateDB.stateObjectsPending = addressToStructPool.Get().(map[common.Address]struct{}) - s.StateDB.stateObjectsDirty = addressToStructPool.Get().(map[common.Address]struct{}) - s.StateDB.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - s.StateDB.stateObjectsDestructDirty = make(map[common.Address]*types.StateAccount) - s.StateDB.dbErr = nil - s.StateDB.refund = 0 - s.StateDB.thash = common.Hash{} - s.StateDB.txIndex = 0 - s.StateDB.logs = logsPool.Get().(map[common.Hash][]*types.Log) - s.StateDB.logSize = 0 - s.StateDB.rwSet = nil - s.StateDB.mvStates = nil - s.StateDB.stat = nil - s.StateDB.preimages = nil - s.StateDB.accessList = nil - s.StateDB.transientStorage = nil - s.StateDB.journal = journalPool.Get().(*journal) - s.StateDB.validRevisions = nil - s.StateDB.nextRevisionId = 0 - s.StateDB.AccountReads = 0 - s.StateDB.AccountHashes = 0 - s.StateDB.AccountUpdates = 0 - s.StateDB.AccountCommits = 0 - s.StateDB.StorageReads = 0 - s.StateDB.StorageHashes = 0 - s.StateDB.StorageUpdates = 0 - s.StateDB.StorageCommits = 0 - s.StateDB.SnapshotAccountReads = 0 - s.StateDB.SnapshotStorageReads = 0 - s.StateDB.SnapshotCommits = 0 - s.StateDB.TrieDBCommits = 0 - s.StateDB.TrieCommits = 0 - s.StateDB.CodeCommits = 0 - s.StateDB.TxDAGGenerate = 0 - s.StateDB.AccountUpdated = 0 - s.StateDB.StorageUpdated = 0 - s.StateDB.AccountDeleted = 0 - s.StateDB.StorageDeleted = 0 - s.StateDB.isParallel = true - s.StateDB.parallel = ParallelState{} - s.StateDB.onCommit = nil - - s.parallel.isSlotDB = true - s.parallel.SlotIndex = -1 - s.parallel.stateObjects = nil - s.parallel.locatStateObjects = nil - s.parallel.baseStateDB = nil - s.parallel.baseTxIndex = -1 - s.parallel.dirtiedStateObjectsInSlot = addressToStateObjectsPool.Get().(map[common.Address]*stateObject) - s.parallel.unconfirmedDBs = nil - s.parallel.nonceChangesInSlot = addressToStructPool.Get().(map[common.Address]struct{}) - s.parallel.nonceReadsInSlot = addressToUintPool.Get().(map[common.Address]uint64) - s.parallel.balanceChangesInSlot = addressToStructPool.Get().(map[common.Address]struct{}) - s.parallel.balanceReadsInSlot = balancePool.Get().(map[common.Address]*uint256.Int) - s.parallel.locatStateObjects = addressToStateObjectsPool.Get().(map[common.Address]*stateObject) - s.parallel.codeReadsInSlot = addressToBytesPool.Get().(map[common.Address][]byte) - s.parallel.codeHashReadsInSlot = addressToHashPool.Get().(map[common.Address]common.Hash) - s.parallel.codeChangesInSlot = addressToStructPool.Get().(map[common.Address]struct{}) - s.parallel.kvChangesInSlot = addressToStateKeysPool.Get().(map[common.Address]StateKeys) - s.parallel.kvReadsInSlot = addressToStoragePool.Get().(map[common.Address]Storage) - s.parallel.addrStateReadsInSlot = addressToBoolPool.Get().(map[common.Address]bool) - s.parallel.addrStateChangesInSlot = addressToBoolPool.Get().(map[common.Address]bool) - s.parallel.addrSnapDestructsReadsInSlot = addressToBoolPool.Get().(map[common.Address]bool) - s.parallel.createdObjectRecord = addressToStructPool.Get().(map[common.Address]struct{}) - s.parallel.needsRedo = false - s.parallel.useDAG = false - s.parallel.conflictCheckStateObjectCache = nil - s.parallel.conflictCheckKVReadCache = nil -} diff --git a/core/state/state_object.go b/core/state/state_object.go index 561ee0fbef..a7a0aafb67 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -744,39 +744,6 @@ func (s *stateObject) setBalance(amount *uint256.Int) { // ReturnGas Return the gas back to the origin. Used by the Virtual machine or Closures func (s *stateObject) ReturnGas(gas *uint256.Int) {} -func (s *stateObject) lightCopy(db *ParallelStateDB) *stateObject { - object := newObject(db, s.isParallel, s.address, &s.data) - if s.trie != nil { - s.db.trieParallelLock.Lock() - object.trie = db.db.CopyTrie(s.trie) - s.db.trieParallelLock.Unlock() - } - object.code = s.code - object.selfDestructed = s.selfDestructed // should be false - object.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok - object.deleted = s.deleted // should be false - - object.dirtyBalance = s.dirtyBalance - object.dirtyNonce = s.dirtyNonce - object.dirtyCodeHash = s.dirtyCodeHash - - // object generated by lightCopy() is supposed to be used in the slot. - // and the origin storage will be filled at GetState() etc. - // the dirty and pending will be recorded in the execution for new changes. - // so no need to do the copy. - // moreover, copy storage here is tricky, as the stateDB is changed concurrently with - // the slot execution, and the snap is updated only at Commit stage. - // so the origin may different between the time NOW and the time of merge, so the conflict check is vital to avoid - // the problem. fortunately, the KVRead will record this and compare it with mainDB. - - //object.dirtyStorage = s.dirtyStorage.Copy() - s.storageRecordsLock.RLock() - object.originStorage = s.originStorage.Copy() - object.pendingStorage = s.pendingStorage.Copy() - s.storageRecordsLock.RUnlock() - return object -} - // deepCopy happens only at global serial execution stage. // E.g. prepareForParallel and merge (copy slotObj to mainDB) // otherwise the origin/dirty/pending storages may cause incorrect issue. diff --git a/core/state/statedb.go b/core/state/statedb.go index c86134dcda..efecb52e58 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,7 +18,6 @@ package state import ( - "container/list" "errors" "fmt" "runtime" @@ -308,7 +307,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) stateObjectsPending: make(map[common.Address]struct{}), stateObjectsDirty: make(map[common.Address]struct{}), stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - stateObjectsDestructDirty: make(map[common.Address]*types.StateAccount, defaultNumOfSlots), + stateObjectsDestructDirty: make(map[common.Address]*types.StateAccount), logs: make(map[common.Hash][]*types.Log), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -1214,146 +1213,6 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { return state } -var journalPool = sync.Pool{ - New: func() interface{} { - return &journal{ - dirties: make(map[common.Address]int, defaultNumOfSlots), - entries: make([]journalEntry, 0, defaultNumOfSlots), - } - }, -} - -var addressToStructPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, -} - -var addressToStateKeysPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, -} - -var addressToStoragePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]Storage, defaultNumOfSlots) }, -} - -var addressToStateObjectsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*stateObject, defaultNumOfSlots) }, -} - -var balancePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*uint256.Int, defaultNumOfSlots) }, -} - -var addressToHashPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]common.Hash, defaultNumOfSlots) }, -} - -var addressToBytesPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, -} - -var addressToBoolPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]bool, defaultNumOfSlots) }, -} - -var addressToUintPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]uint64, defaultNumOfSlots) }, -} - -var logsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, -} - -func NewEmptySlotDB() *ParallelStateDB { - parallel := ParallelState{ - // The stateObjects in Parallel is thread-local. - // The base stateDB's stateObjects is thread-unsafe as it is not guarded by lock. - // The base stateDB's parallel.stateObjects is SyncMap and thread-safe. and no extra lock needed (TODO-dav). - // The base stateDB's parallel.stateObjects are updated by mergeSlotDB with Lock. - // The base stateDB's stateObject is read-only and never be updated once parallel execution happens. - // AND, presumably, the stateDB's stateObject is usually empty for real on-chain cases. - // Before execution, the slotDB should copy objects from base stateDB's parallel.stateObjects and stateObjects - // NOTICE: - // We are not reusing the base slot db's stateObjects although copy can be avoid. Because multiple thread - // access has lock check and there might be tricky bug such as thread1 handle tx0 at the same time with thread2 - // handle tx1, so what thread1's slotDB see in the s.parallel.stateObjects might be the middle result of Thread2. - // - // We are not do simple copy (lightweight pointer copy) as the stateObject can be accessed by different thread. - - stateObjects: nil, /* The parallel execution will not use this field, except the base DB */ - locatStateObjects: addressToStateObjectsPool.Get().(map[common.Address]*stateObject), - codeReadsInSlot: addressToBytesPool.Get().(map[common.Address][]byte), - codeHashReadsInSlot: addressToHashPool.Get().(map[common.Address]common.Hash), - codeChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), - kvChangesInSlot: addressToStateKeysPool.Get().(map[common.Address]StateKeys), - kvReadsInSlot: addressToStoragePool.Get().(map[common.Address]Storage), - balanceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: balancePool.Get().(map[common.Address]*uint256.Int), - addrStateReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), - addrStateChangesInSlot: addressToBoolPool.Get().(map[common.Address]bool), - nonceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), - nonceReadsInSlot: addressToUintPool.Get().(map[common.Address]uint64), - addrSnapDestructsReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), - isSlotDB: true, - dirtiedStateObjectsInSlot: addressToStateObjectsPool.Get().(map[common.Address]*stateObject), - createdObjectRecord: addressToStructPool.Get().(map[common.Address]struct{}), - } - state := &ParallelStateDB{ - StateDB: StateDB{ - db: nil, - trie: nil, // Parallel StateDB may access the trie, but it takes no effect to the baseDB. - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: addressToStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: addressToStructPool.Get().(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - refund: 0, // should be 0 - logs: logsPool.Get().(map[common.Hash][]*types.Log), - logSize: 0, - preimages: nil, - journal: journalPool.Get().(*journal), - hasher: crypto.NewKeccakState(), - isParallel: true, - parallel: parallel, - }, - } - state.snapDestructs = addressToStructPool.Get().(map[common.Address]struct{}) - return state -} - -// CopyForSlot copy all the basic fields, initialize the memory ones -func (s *StateDB) CopyForSlot(parallelDBManager *ParallelDBManager) *ParallelStateDB { - state := parallelDBManager.allocate() - state.db = s.db - s.preimages = make(map[common.Hash][]byte, len(s.preimages)) - - s.snapParallelLock.RLock() - for k, v := range s.snapDestructs { - state.snapDestructs[k] = v - } - s.snapParallelLock.RUnlock() - - if s.snaps != nil { - state.snaps = s.snaps - state.snap = s.snap - } - - // Deep copy the state changes made in the scope of block - // along with their original values. - s.AccountMux.Lock() - state.accounts = copySet(s.accounts) - state.accountsOrigin = copySet(state.accountsOrigin) - s.AccountMux.Unlock() - s.StorageMux.Lock() - state.storages = copy2DSet(s.storages) - state.storagesOrigin = copy2DSet(state.storagesOrigin) - s.StorageMux.Unlock() - - return state -} - // Snapshot returns an identifier for the current revision of the state. func (s *StateDB) Snapshot() int { id := s.nextRevisionId @@ -2435,280 +2294,3 @@ func (s *StateDB) PrepareForParallel() { } } } - -func (s *StateDB) AddrPrefetch(slotDb *ParallelStateDB) { - addressesToPrefetch := make([][]byte, 0, len(slotDb.parallel.dirtiedStateObjectsInSlot)) - for addr, obj := range slotDb.parallel.dirtiedStateObjectsInSlot { - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - if obj.deleted { - continue - } - obj.storageRecordsLock.RLock() - // copied from obj.finalise(true) - slotsToPrefetch := make([][]byte, 0, obj.dirtyStorage.Length()) - obj.dirtyStorage.Range(func(key, value interface{}) bool { - originalValue, _ := obj.originStorage.GetValue(key.(common.Hash)) - if value.(common.Hash) != originalValue { - originalKey := key.(common.Hash) - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure - } - return true - }) - obj.storageRecordsLock.RUnlock() - if s.prefetcher != nil && len(slotsToPrefetch) > 0 { - s.trieParallelLock.Lock() - s.prefetcher.prefetch(obj.addrHash, obj.data.Root, obj.address, slotsToPrefetch) - s.trieParallelLock.Unlock() - } - } - - if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.trieParallelLock.Lock() - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, emptyAddr, addressesToPrefetch) - s.trieParallelLock.Unlock() - } -} - -// MergeSlotDB is for Parallel execution mode, when the transaction has been -// finalized(dirty -> pending) on execution slot, the execution results should be -// merged back to the main StateDB. -func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int, fees *DelayedGasFee) *StateDB { - - for s.nextRevisionId < slotDb.nextRevisionId { - if len(slotDb.validRevisions) > 0 { - r := slotDb.validRevisions[s.nextRevisionId] - s.validRevisions = append(s.validRevisions, r) - } - s.nextRevisionId++ - if len(slotDb.validRevisions) < s.nextRevisionId { - continue - } - } - - // receipt.Logs use unified log index within a block - // align slotDB's log index to the block stateDB's logSize - for _, l := range slotReceipt.Logs { - l.Index += s.logSize - s.logs[s.thash] = append(s.logs[s.thash], l) - } - - s.logSize += slotDb.logSize - - // only merge dirty objects - addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) - - for addr := range slotDb.stateObjectsDirty { - if _, exist := s.stateObjectsDirty[addr]; !exist { - s.stateObjectsDirty[addr] = struct{}{} - } - - // stateObjects: KV, balance, nonce... - dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] - if !ok { - log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) - continue - } - mainObj, exist := s.loadStateObj(addr) - if !exist || mainObj.deleted { - // addr not exist on main DB, the object is created in the merging tx. - mainObj = dirtyObj.deepCopy(s) - if !dirtyObj.deleted { - mainObj.finalise(true) - } - s.storeStateObj(addr, mainObj) - - // fixme: should not delete, would cause unconfirmed DB incorrect? - // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - s.AccountMux.Lock() - delete(s.accounts, dirtyObj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.accountsOrigin, dirtyObj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - s.AccountMux.Unlock() - s.StorageMux.Lock() - delete(s.storages, dirtyObj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.storagesOrigin, dirtyObj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) - s.StorageMux.Unlock() - } - } else { - // addr already in main DB, do merge: balance, KV, code, State(create, suicide) - // can not do copy or ownership transfer directly, since dirtyObj could have outdated - // data(maybe updated within the conflict window) - var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe - if createdOrChanged, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { - // there are 4 kinds of state change: - // 1.Suicide - // 2.Empty Delete - // 3.createObject - // a: AddBalance,SetState to a non-exist or deleted(suicide, empty delete) address. - // b: CreateAccount: like DAO the fork, regenerate an account carry its balance without KV - // 4. setState. - // For these state change - if createdOrChanged { - // Need to differentiate the case of createObject and setState, since the mainDB at this moment contains - // the latest update of the object, which cause the object.data.root newer then the dirtyObject. so - // the deepCopy() here can not be used for setState as it introduces issue that the pendingStorage - // may not empty until block validation. so the pendingStorage filled by the execution of previous txs - // in same block may get overwritten by deepCopy here, which causes issue in root calculation. - if _, created := s.parallel.createdObjectRecord[addr]; created { - newMainObj = dirtyObj.deepCopy(s) - } else { - // Merge the dirtyObject with mainObject - if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - newMainObj.dirtyBalance = dirtyObj.dirtyBalance - newMainObj.data.Balance = dirtyObj.data.Balance - } - if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - newMainObj.code = dirtyObj.code - newMainObj.dirtyCodeHash = dirtyObj.dirtyCodeHash - newMainObj.data.CodeHash = dirtyObj.data.CodeHash - newMainObj.dirtyCode = true - } - if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { - newMainObj.MergeSlotObject(s.db, dirtyObj, keys) - } - if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.data.Nonce = dirtyObj.data.Nonce - newMainObj.dirtyNonce = dirtyObj.dirtyNonce - } - newMainObj.deleted = dirtyObj.deleted - } - } else { - // The object is deleted in the TX. - newMainObj = dirtyObj.deepCopy(s) - } - - // All cases with addrStateChange set to true/false can be deleted. so handle it here. - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - s.AccountMux.Lock() - delete(s.accounts, dirtyObj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.accountsOrigin, dirtyObj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - s.AccountMux.Unlock() - s.StorageMux.Lock() - delete(s.storages, dirtyObj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.storagesOrigin, dirtyObj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) - s.StorageMux.Unlock() - } - } else { - // deepCopy a temporary *stateObject for safety, since slot could read the address, - // dispatch should avoid overwrite the StateObject directly otherwise, it could - // crash for: concurrent map iteration and map write - // As there is dirtyBalance, Nonce and codehash, we keep it to mainObj and leave the merging work - // to "mainObj.finalise()", just in case that newMainObj.delete == true and somewhere potentially - // access the Nonce, balance or codehash later. - if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - newMainObj.dirtyBalance = dirtyObj.dirtyBalance - newMainObj.data.Balance = dirtyObj.data.Balance - } - if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - newMainObj.code = dirtyObj.code - newMainObj.dirtyCodeHash = dirtyObj.dirtyCodeHash - newMainObj.data.CodeHash = dirtyObj.data.CodeHash - newMainObj.dirtyCode = true - } - if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { - newMainObj.MergeSlotObject(s.db, dirtyObj, keys) - } - if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.data.Nonce = dirtyObj.data.Nonce - newMainObj.dirtyNonce = dirtyObj.dirtyNonce - } - newMainObj.deleted = dirtyObj.deleted - } - if !newMainObj.deleted { - newMainObj.finalise(true) // true: prefetch on dispatcher - } - // update the object - s.storeStateObj(addr, newMainObj) - } - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - - if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.trieParallelLock.Lock() - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, emptyAddr, addressesToPrefetch) // prefetch for trie node of account - s.trieParallelLock.Unlock() - } - - for addr := range slotDb.stateObjectsPending { - if _, exist := s.stateObjectsPending[addr]; !exist { - s.stateObjectsPending[addr] = struct{}{} - } - } - - s.stateObjectDestructLock.Lock() - for addr := range slotDb.stateObjectsDestruct { - if acc, exist := s.stateObjectsDestruct[addr]; !exist { - s.stateObjectsDestruct[addr] = acc - } - } - s.stateObjectDestructLock.Unlock() - // slotDb.logs: logs will be kept in receipts, no need to do merge - for hash, preimage := range slotDb.preimages { - s.preimages[hash] = preimage - } - - if s.accessList != nil && slotDb.accessList != nil { - s.accessList.Append(slotDb.accessList) - } - - for k := range slotDb.snapDestructs { - s.snapParallelLock.Lock() - s.snapDestructs[k] = struct{}{} - s.snapParallelLock.Unlock() - } - - s.SetTxContext(slotDb.thash, slotDb.txIndex) - return s -} - -// NewParallelDBManager creates a new ParallelDBManager with the specified number of instance -func NewParallelDBManager(initialCount int, newFunc func() *ParallelStateDB) *ParallelDBManager { - manager := &ParallelDBManager{ - pool: list.New(), - mutex: sync.Mutex{}, - newFunc: newFunc, - } - - for i := 0; i < initialCount; i++ { - manager.pool.PushBack(newFunc()) - } - - return manager -} - -// ParallelDBManager manages a pool of ParallelDB instances -type ParallelDBManager struct { - pool *list.List - mutex sync.Mutex - newFunc func() *ParallelStateDB // Function to create a new ParallelDB instance -} - -// allocate acquires a ParallelStateDB instance from the pool -// if the pool is empty, directly create a new one. -func (m *ParallelDBManager) allocate() *ParallelStateDB { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.pool.Len() == 0 { - return m.newFunc() - } - - elem := m.pool.Front() - m.pool.Remove(elem) - ret := elem.Value.(*ParallelStateDB) - return ret -} - -func (m *ParallelDBManager) reclaim(s *ParallelStateDB) { - m.mutex.Lock() - defer m.mutex.Unlock() - m.pool.PushBack(s) -} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 0a759021d9..b7eae225ef 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,7 +19,6 @@ package state import ( "bytes" "encoding/binary" - "encoding/hex" "errors" "fmt" "math" @@ -1197,378 +1196,3 @@ func TestDeleteStorage(t *testing.T) { t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) } } - -func TestSuicide(t *testing.T) { - // Create an initial state with a few accounts - db := rawdb.NewMemoryDatabase() - state, _ := New(types.EmptyRootHash, NewDatabase(db), nil) - unconfirmedDBs := new(sync.Map) - - state.PrepareForParallel() - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - addr := common.BytesToAddress([]byte("so")) - slotDb.SetBalance(addr, uint256.NewInt(1)) - - slotDb.SelfDestruct(addr) - - if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in addrStateChangesInSlot") - } - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - hasSuicide := slotDb.HasSelfDestructed(addr) - if !hasSuicide { - t.Fatalf("address should be suicided") - } - - if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in addrStateReadsInSlot") - } -} - -func TestSetAndGetState(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(types.EmptyRootHash, db, nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - unconfirmedDBs := new(sync.Map) - state.PrepareForParallel() - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - slotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in stateChangesInSlot") - } - - oldValueRead := state.GetState(addr, common.BytesToHash([]byte("test key"))) - emptyHash := common.Hash{} - if oldValueRead != emptyHash { - t.Fatalf("value read in old state should be empty") - } - - valueRead := slotDb.GetState(addr, common.BytesToHash([]byte("test key"))) - if valueRead != common.BytesToHash([]byte("test store")) { - t.Fatalf("value read should be equal to the stored value") - } - - if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in stateReadsInSlot") - } -} - -func TestSetAndGetCode(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - state.PrepareForParallel() - - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; ok { - t.Fatalf("address should not exist in dirtiedStateObjectsInSlot") - } - - slotDb.SetCode(addr, []byte("test code")) - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - if _, ok := slotDb.parallel.codeChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in codeChangesInSlot") - } - - codeRead := slotDb.GetCode(addr) - if string(codeRead) != "test code" { - t.Fatalf("code read should be equal to the code stored") - } - - if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in codeReadsInSlot") - } -} - -func TestGetCodeSize(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - state.PrepareForParallel() - - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - slotDb.SetCode(addr, []byte("test code")) - - codeSize := slotDb.GetCodeSize(addr) - if codeSize != 9 { - t.Fatalf("code size should be 9") - } - - if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in codeReadsInSlot") - } -} - -func TestGetCodeHash(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - slotDb.SetCode(addr, []byte("test code")) - - codeSize := slotDb.GetCodeHash(addr) - - if hex.EncodeToString(codeSize[:]) != "6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb" { - t.Fatalf("code hash should be 6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb") - } - if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in codeReadsInSlot") - } -} - -func TestSetNonce(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - - addr := common.BytesToAddress([]byte("so")) - state.SetBalance(addr, uint256.NewInt(1)) - state.SetNonce(addr, 1) - state.PrepareForParallel() - - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - slotDb.SetNonce(addr, 2) - - oldNonce := state.GetNonce(addr) - if oldNonce != 1 { - t.Fatalf("old nonce should be 1") - } - - newNonce := slotDb.GetNonce(addr) - if newNonce != 2 { - t.Fatalf("new nonce should be 2") - } - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } -} - -func TestSetAndGetBalance(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - - addr := testAddress - state.SetBalance(addr, uint256.NewInt(1)) - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - slotDb.SetBalance(addr, uint256.NewInt(2)) - - oldBalance := state.GetBalance(addr) - if oldBalance.Uint64() != 1 { - t.Fatalf("old balance should be 1") - } - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceChangesInSlot") - } - - newBalance := slotDb.GetBalance(addr) - if newBalance.Uint64() != 2 { - t.Fatalf("new nonce should be 2") - } - - if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceReadsInSlot") - } -} - -func TestSubBalance(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - addr := testAddress - state.SetBalance(addr, uint256.NewInt(2)) - - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - slotDb.SubBalance(addr, uint256.NewInt(1)) - - oldBalance := state.GetBalance(addr) - if oldBalance.Uint64() != 2 { - t.Fatalf("old balance should be 1") - } - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceChangesInSlot") - } - - if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceReadsInSlot") - } - - newBalance := slotDb.GetBalance(addr) - if newBalance.Uint64() != 1 { - t.Fatalf("new nonce should be 2") - } -} - -func TestAddBalance(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - addr := testAddress - state.SetBalance(addr, uint256.NewInt(2)) - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - slotDb.AddBalance(addr, uint256.NewInt(1)) - - oldBalance := state.GetBalance(addr) - if oldBalance.Uint64() != 2 { - t.Fatalf("old balance should be 1") - } - - if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - t.Fatalf("address should exist in dirtiedStateObjectsInSlot") - } - - if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceChangesInSlot") - } - - if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in balanceReadsInSlot") - } - - newBalance := slotDb.GetBalance(addr) - if newBalance.Uint64() != 3 { - t.Fatalf("new nonce should be 2") - } -} - -func TestEmpty(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - addr := testAddress - state.SetBalance(addr, uint256.NewInt(2)) - state.PrepareForParallel() - - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - empty := slotDb.Empty(addr) - if empty { - t.Fatalf("address should exist") - } - - if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in addrStateReadsInSlot") - } -} - -func TestExist(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - addr := testAddress - state.SetBalance(addr, uint256.NewInt(2)) - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(1, NewEmptySlotDB) - slotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - exist := slotDb.Exist(addr) - if !exist { - t.Fatalf("address should exist") - } - - if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { - t.Fatalf("address should exist in addrStateReadsInSlot") - } -} - -func TestMergeSlotDB(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) - state.PrepareForParallel() - unconfirmedDBs := new(sync.Map) - manager := NewParallelDBManager(2, NewEmptySlotDB) - oldSlotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - newSlotDb := NewSlotDB(state, 0, 0, manager, unconfirmedDBs, false) - - addr := testAddress - newSlotDb.SetBalance(addr, uint256.NewInt(2)) - newSlotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) - newSlotDb.SetCode(addr, []byte("test code")) - newSlotDb.SelfDestruct(addr) - newSlotDb.Finalise(true) - - changeList := oldSlotDb.MergeSlotDB(newSlotDb, &types.Receipt{}, 0, nil) - - if ok := changeList.getDeletedStateObject(addr); ok == nil || !ok.selfDestructed { - t.Fatalf("address should exist in StateObjectSuicided") - } - - if ok := changeList.getStateObject(addr); ok != nil { - t.Fatalf("address should exist in StateChangeSet") - } - - if ok := changeList.GetBalance(addr); ok != common.U2560 { - t.Fatalf("address should exist in StateChangeSet") - } - - if ok := changeList.GetCode(addr); ok != nil { - t.Fatalf("address should exist in CodeChangeSet") - } - - if ok := changeList.getStateObject(addr); ok != nil { - t.Fatalf("address should exist in AddrStateChangeSet") - } -} diff --git a/core/state_processor.go b/core/state_processor.go index 6cc91be944..df9d788707 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -98,7 +98,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) } statedb.MarkFullProcessed() - if p.bc.enableTxDAG && !p.bc.vmConfig.EnableParallelExecLegacy && !p.bc.vmConfig.EnableParallelExec { + if p.bc.enableTxDAG && !p.bc.vmConfig.EnableParallelExec { statedb.ResetMVStates(len(block.Transactions())) } // Iterate over and process the individual transactions diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index d8f56ff2a3..2278b81710 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -34,7 +34,6 @@ type Config struct { NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages ExtraEips []int // Additional EIPS that are to be enabled - EnableParallelExecLegacy bool // Whether to execute transaction in parallel mode when do full sync EnableParallelExec bool // Whether to execute transaction in parallel mode when do full sync ParallelTxNum int // Number of slot for transaction execution OptimismPrecompileOverrides PrecompileOverrides // Precompile overrides for Optimism diff --git a/eth/backend.go b/eth/backend.go index fa536dc985..a022178fa9 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -224,7 +224,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { var ( vmConfig = vm.Config{ EnablePreimageRecording: config.EnablePreimageRecording, - EnableParallelExecLegacy: config.ParallelTxLegacyMode, EnableParallelExec: config.ParallelTxMode, ParallelTxNum: config.ParallelTxNum, EnableOpcodeOptimizations: config.EnableOpcodeOptimizing, @@ -277,8 +276,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.EnableParallelTxDAG { if config.ParallelTxMode { eth.blockchain.SetupTxDAGGeneration(config.ParallelTxDAGFile, config.ParallelTxMode) - } else { - eth.blockchain.SetupTxDAGGeneration(config.ParallelTxDAGFile, config.ParallelTxLegacyMode) } } if chainConfig := eth.blockchain.Config(); chainConfig.Optimism != nil { // config.Genesis.Config.ChainID cannot be used because it's based on CLI flags only, thus default to mainnet L1 diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 1b6f1f345b..9e1ef6cbab 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -219,7 +219,6 @@ type Config struct { RollupDisableTxPoolAdmission bool RollupHaltOnIncompatibleProtocolVersion string - ParallelTxLegacyMode bool // Whether to execute transaction in parallel mode when do full sync ParallelTxMode bool // Whether to execute transaction in parallel mode when do full sync ParallelTxNum int // Number of slot for transaction execution EnableOpcodeOptimizing bool