From a6ce37c45c5b2dde6b19c61e725b964ab78ac45c Mon Sep 17 00:00:00 2001 From: galaio Date: Tue, 30 Jul 2024 21:21:22 +0800 Subject: [PATCH 01/42] txdag: the initial the TxDAG commit; txdag: opt rw record flag; txdag: fix some broken UTs; txdag: opt some logic; --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 10 + core/blockchain.go | 18 +- core/state/journal.go | 2 +- core/state/state_object.go | 84 +++++- core/state/statedb.go | 328 +++++++++++++++----- core/state/statedb_fuzz_test.go | 2 +- core/state_processor.go | 16 + core/state_transition.go | 7 + core/types/dag.go | 455 ++++++++++++++++++++++++++++ core/types/dag_test.go | 174 +++++++++++ core/types/mvstates.go | 517 ++++++++++++++++++++++++++++++++ core/types/mvstates_test.go | 241 +++++++++++++++ core/vm/interface.go | 3 + eth/backend.go | 3 + eth/ethconfig/config.go | 1 + miner/miner.go | 3 +- miner/worker.go | 13 + 18 files changed, 1797 insertions(+), 81 deletions(-) create mode 100644 core/types/dag.go create mode 100644 core/types/dag_test.go create mode 100644 core/types/mvstates.go create mode 100644 core/types/mvstates_test.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2e55edcaf3..b6ccb08433 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -170,6 +170,7 @@ var ( utils.RollupComputePendingBlock, utils.RollupHaltOnIncompatibleProtocolVersionFlag, utils.RollupSuperchainUpgradesFlag, + utils.ParallelTxDAGFlag, configFileFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e3a4a42f21..b96207dd2b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1099,6 +1099,12 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Category: flags.MetricsCategory, } + ParallelTxDAGFlag = &cli.BoolFlag{ + Name: "parallel.txdag", + Usage: "Enable the experimental parallel TxDAG generation (default = false)", + Category: flags.VMCategory, + } + VMOpcodeOptimizeFlag = &cli.BoolFlag{ Name: "vm.opcode.optimize", Usage: "enable opcode optimization", @@ -1989,6 +1995,10 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name) } + if ctx.IsSet(ParallelTxDAGFlag.Name) { + cfg.EnableParallelTxDAG = ctx.Bool(ParallelTxDAGFlag.Name) + } + if ctx.IsSet(VMOpcodeOptimizeFlag.Name) { cfg.EnableOpcodeOptimizing = ctx.Bool(VMOpcodeOptimizeFlag.Name) if cfg.EnableOpcodeOptimizing { diff --git a/core/blockchain.go b/core/blockchain.go index 511d4db8a9..4e5e5893ce 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -92,6 +92,8 @@ var ( triedbCommitExternalTimer = metrics.NewRegisteredTimer("chain/triedb/commit/external", nil) innerExecutionTimer = metrics.NewRegisteredTimer("chain/inner/execution", nil) + txDAGGenerateTimer = metrics.NewRegisteredTimer("chain/block/txdag/gen", nil) + blockGasUsedGauge = metrics.NewRegisteredGauge("chain/block/gas/used", nil) mgaspsGauge = metrics.NewRegisteredGauge("chain/mgas/ps", nil) @@ -298,6 +300,9 @@ type BlockChain struct { processor Processor // Block transaction processor interface forker *ForkChoice vmConfig vm.Config + + // parallel EVM related + enableTxDAG bool } // NewBlockChain returns a fully initialised block chain using information @@ -1987,8 +1992,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation) - blockExecutionTimer.Update(ptime) // The time spent on block execution - blockValidationTimer.Update(vtime) // The time spent on block validation + txDAGGenerateTimer.Update(statedb.TxDAGGenerate) + blockExecutionTimer.Update(ptime) // The time spent on block execution + blockValidationTimer.Update(vtime) // The time spent on block validation innerExecutionTimer.Update(DebugInnerExecutionDuration) @@ -2758,3 +2764,11 @@ func createDelFn(bc *BlockChain) func(db ethdb.KeyValueWriter, hash common.Hash, func (bc *BlockChain) HeaderChainForceSetHead(headNumber uint64) { bc.hc.SetHead(headNumber, nil, createDelFn(bc)) } + +func (bc *BlockChain) TxDAGEnabled() bool { + return bc.enableTxDAG +} + +func (bc *BlockChain) SetupTxDAGGeneration() { + bc.enableTxDAG = true +} diff --git a/core/state/journal.go b/core/state/journal.go index 6cdc1fc868..a0e4b2dd53 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -163,7 +163,7 @@ func (ch createObjectChange) dirtied() *common.Address { func (ch resetObjectChange) revert(s *StateDB) { s.setStateObject(ch.prev) if !ch.prevdestruct { - delete(s.stateObjectsDestruct, ch.prev.address) + s.removeStateObjectsDestruct(ch.prev.address) } if ch.prevAccount != nil { s.accounts[ch.prev.addrHash] = ch.prevAccount diff --git a/core/state/state_object.go b/core/state/state_object.go index 8696557845..b432603ffe 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "slices" "sync" "time" @@ -70,6 +71,11 @@ type stateObject struct { origin *types.StateAccount // Account original data without any change applied, nil means it was not existent data types.StateAccount // Account data with all mutations applied in the scope of block + // dirty account state + dirtyBalance *uint256.Int + dirtyNonce *uint64 + dirtyCodeHash []byte + // Write caches. trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded @@ -96,7 +102,7 @@ type stateObject struct { // empty returns whether the account is considered empty. func (s *stateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) + return s.Nonce() == 0 && s.Balance().IsZero() && bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) } // newObject creates a state object. @@ -108,7 +114,7 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s if acct == nil { acct = types.NewEmptyStateAccount() } - return &stateObject{ + s := &stateObject{ db: db, address: address, addrHash: crypto.Keccak256Hash(address[:]), @@ -119,6 +125,15 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s dirtyStorage: make(Storage), created: created, } + + // dirty data when create a new account + if created { + s.dirtyBalance = new(uint256.Int).Set(acct.Balance) + s.dirtyNonce = new(uint64) + *s.dirtyNonce = acct.Nonce + s.dirtyCodeHash = acct.CodeHash + } + return s } // EncodeRLP implements rlp.Encoder. @@ -188,7 +203,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // 1) resurrect happened, and new slot values were set -- those should // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + if _, destructed := s.db.getStateObjectsDestruct(s.address); destructed { return common.Hash{} } // If no live objects are available, attempt to use snapshots @@ -263,6 +278,19 @@ func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure } } + + if s.dirtyNonce != nil { + s.data.Nonce = *s.dirtyNonce + s.dirtyNonce = nil + } + if s.dirtyBalance != nil { + s.data.Balance = s.dirtyBalance + s.dirtyBalance = nil + } + if s.dirtyCodeHash != nil { + s.data.CodeHash = s.dirtyCodeHash + s.dirtyCodeHash = nil + } if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } @@ -271,6 +299,27 @@ func (s *stateObject) finalise(prefetch bool) { } } +func (s *stateObject) finaliseRWSet() { + for key, value := range s.dirtyStorage { + // three are some unclean dirtyStorage from previous reverted txs, it will skip finalise + // so add a new rule, if val has no change, then skip it + if value == s.GetCommittedState(key) { + continue + } + s.db.RecordWrite(types.StorageStateKey(s.address, key), value) + } + + if s.dirtyNonce != nil && *s.dirtyNonce != s.data.Nonce { + s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountNonce), *s.dirtyNonce) + } + if s.dirtyBalance != nil && s.dirtyBalance.Cmp(s.data.Balance) != 0 { + s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountBalance), new(uint256.Int).Set(s.dirtyBalance)) + } + if s.dirtyCodeHash != nil && !slices.Equal(s.dirtyCodeHash, s.data.CodeHash) { + s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountCodeHash), s.dirtyCodeHash) + } +} + // updateTrie is responsible for persisting cached storage changes into the // object's storage trie. In case the storage trie is not yet loaded, this // function will load the trie automatically. If any issues arise during the @@ -463,13 +512,13 @@ func (s *stateObject) SubBalance(amount *uint256.Int) { func (s *stateObject) SetBalance(amount *uint256.Int) { s.db.journal.append(balanceChange{ account: &s.address, - prev: new(uint256.Int).Set(s.data.Balance), + prev: new(uint256.Int).Set(s.Balance()), }) s.setBalance(amount) } func (s *stateObject) setBalance(amount *uint256.Int) { - s.data.Balance = amount + s.dirtyBalance = amount } func (s *stateObject) deepCopy(db *StateDB) *stateObject { @@ -490,6 +539,16 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj.selfDestructed = s.selfDestructed obj.dirtyCode = s.dirtyCode obj.deleted = s.deleted + if s.dirtyBalance != nil { + obj.dirtyBalance = new(uint256.Int).Set(s.dirtyBalance) + } + if s.dirtyNonce != nil { + obj.dirtyNonce = new(uint64) + *obj.dirtyNonce = *s.dirtyNonce + } + if s.dirtyCodeHash != nil { + obj.dirtyCodeHash = s.dirtyCodeHash + } return obj } @@ -547,7 +606,7 @@ func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { func (s *stateObject) setCode(codeHash common.Hash, code []byte) { s.code = code - s.data.CodeHash = codeHash[:] + s.dirtyCodeHash = codeHash[:] s.dirtyCode = true compiler.GenOrLoadOptimizedCode(codeHash, s.code) } @@ -555,24 +614,33 @@ func (s *stateObject) setCode(codeHash common.Hash, code []byte) { func (s *stateObject) SetNonce(nonce uint64) { s.db.journal.append(nonceChange{ account: &s.address, - prev: s.data.Nonce, + prev: s.Nonce(), }) s.setNonce(nonce) } func (s *stateObject) setNonce(nonce uint64) { - s.data.Nonce = nonce + s.dirtyNonce = &nonce } func (s *stateObject) CodeHash() []byte { + if len(s.dirtyCodeHash) > 0 { + return s.dirtyCodeHash + } return s.data.CodeHash } func (s *stateObject) Balance() *uint256.Int { + if s.dirtyBalance != nil { + return s.dirtyBalance + } return s.data.Balance } func (s *stateObject) Nonce() uint64 { + if s.dirtyNonce != nil { + return *s.dirtyNonce + } return s.data.Nonce } diff --git a/core/state/statedb.go b/core/state/statedb.go index f5464eb23c..83c3fe94c6 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "errors" "fmt" "runtime" "sort" @@ -90,10 +91,11 @@ type StateDB struct { // This map holds 'live' objects, which will get modified while processing // a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + stateObjects map[common.Address]*stateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + stateObjectsDestructDirty map[common.Address]*types.StateAccount // DB error. // State objects are used by the consensus core and VM which are @@ -113,6 +115,11 @@ type StateDB struct { logs map[common.Hash][]*types.Log logSize uint + // parallel EVM related + rwSet *types.RWSet + mvStates *types.MVStates + stat *types.ExeStat + // Preimages occurred seen by VM in the scope of block. preimages map[common.Hash][]byte @@ -143,6 +150,7 @@ type StateDB struct { TrieDBCommits time.Duration TrieCommits time.Duration CodeCommits time.Duration + TxDAGGenerate time.Duration AccountUpdated int StorageUpdated int @@ -160,24 +168,25 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return nil, err } sdb := &StateDB{ - db: db, - trie: tr, - originalRoot: root, - snaps: snaps, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - logs: make(map[common.Hash][]*types.Log), - preimages: make(map[common.Hash][]byte), - journal: newJournal(), - accessList: newAccessList(), - transientStorage: newTransientStorage(), - hasher: crypto.NewKeccakState(), + db: db, + trie: tr, + originalRoot: root, + snaps: snaps, + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), + stateObjects: make(map[common.Address]*stateObject), + stateObjectsPending: make(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + stateObjectsDestructDirty: make(map[common.Address]*types.StateAccount), + logs: make(map[common.Hash][]*types.Log), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + accessList: newAccessList(), + transientStorage: newTransientStorage(), + hasher: crypto.NewKeccakState(), } if sdb.snaps != nil { sdb.snap = sdb.snaps.Snapshot(root) @@ -189,24 +198,25 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) // NewStateDBByTrie creates a new state db by a given trie. func NewStateDBByTrie(tr Trie, db Database, snaps *snapshot.Tree) (*StateDB, error) { sdb := &StateDB{ - db: db, - trie: tr, - originalRoot: tr.Hash(), - snaps: snaps, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - logs: make(map[common.Hash][]*types.Log), - preimages: make(map[common.Hash][]byte), - journal: newJournal(), - accessList: newAccessList(), - transientStorage: newTransientStorage(), - hasher: crypto.NewKeccakState(), + db: db, + trie: tr, + originalRoot: tr.Hash(), + snaps: snaps, + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), + stateObjects: make(map[common.Address]*stateObject), + stateObjectsPending: make(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + stateObjectsDestructDirty: make(map[common.Address]*types.StateAccount), + logs: make(map[common.Hash][]*types.Log), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + accessList: newAccessList(), + transientStorage: newTransientStorage(), + hasher: crypto.NewKeccakState(), } if sdb.snaps != nil { sdb.snap = sdb.snaps.Snapshot(tr.Hash()) @@ -336,7 +346,10 @@ func (s *StateDB) Empty(addr common.Address) bool { } // GetBalance retrieves the balance from the given address or 0 if object not found -func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { +func (s *StateDB) GetBalance(addr common.Address) (ret *uint256.Int) { + defer func() { + s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), ret) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Balance() @@ -345,7 +358,10 @@ func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { } // GetNonce retrieves the nonce from the given address or 0 if object not found -func (s *StateDB) GetNonce(addr common.Address) uint64 { +func (s *StateDB) GetNonce(addr common.Address) (ret uint64) { + defer func() { + s.RecordRead(types.AccountStateKey(addr, types.AccountNonce), ret) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Nonce() @@ -370,6 +386,9 @@ func (s *StateDB) TxIndex() int { } func (s *StateDB) GetCode(addr common.Address) []byte { + defer func() { + s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), s.GetCodeHash(addr)) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Code() @@ -378,6 +397,9 @@ func (s *StateDB) GetCode(addr common.Address) []byte { } func (s *StateDB) GetCodeSize(addr common.Address) int { + defer func() { + s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), s.GetCodeHash(addr)) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.CodeSize() @@ -385,7 +407,10 @@ func (s *StateDB) GetCodeSize(addr common.Address) int { return 0 } -func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { +func (s *StateDB) GetCodeHash(addr common.Address) (ret common.Hash) { + defer func() { + s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), ret.Bytes()) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return common.BytesToHash(stateObject.CodeHash()) @@ -394,7 +419,10 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { } // GetState retrieves a value from the given account's storage trie. -func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { +func (s *StateDB) GetState(addr common.Address, hash common.Hash) (ret common.Hash) { + defer func() { + s.RecordRead(types.StorageStateKey(addr, hash), ret) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.GetState(hash) @@ -403,7 +431,10 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { } // GetCommittedState retrieves a value from the given account's committed storage trie. -func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) (ret common.Hash) { + defer func() { + s.RecordRead(types.StorageStateKey(addr, hash), ret) + }() stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.GetCommittedState(hash) @@ -432,16 +463,22 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool { func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { + s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), stateObject.Balance()) stateObject.AddBalance(amount) + return } + s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), common.Big0) } // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { + s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), stateObject.Balance()) stateObject.SubBalance(amount) + return } + s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), common.Big0) } func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) { @@ -484,8 +521,8 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common // // TODO(rjl493456442) this function should only be supported by 'unwritable' // state and all mutations made should all be discarded afterwards. - if _, ok := s.stateObjectsDestruct[addr]; !ok { - s.stateObjectsDestruct[addr] = nil + if _, ok := s.getStateObjectsDestruct(addr); !ok { + s.setStateObjectsDestruct(addr, nil) } stateObject := s.getOrNewStateObject(addr) for k, v := range storage { @@ -509,7 +546,7 @@ func (s *StateDB) SelfDestruct(addr common.Address) { prevbalance: new(uint256.Int).Set(stateObject.Balance()), }) stateObject.markSelfdestructed() - stateObject.data.Balance = new(uint256.Int) + stateObject.setBalance(new(uint256.Int)) } func (s *StateDB) Selfdestruct6780(addr common.Address) { @@ -621,6 +658,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { // flag set. This is needed by the state journal to revert to the correct s- // destructed object instead of wiping all knowledge about the state object. func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { + s.RecordRead(types.AccountStateKey(addr, types.AccountSelf), struct{}{}) // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj @@ -698,9 +736,9 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) // account and storage data should be cleared as well. Note, it must // be done here, otherwise the destruction event of "original account" // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] + _, prevdestruct := s.getStateObjectsDestruct(prev.address) if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin + s.setStateObjectsDestruct(prev.address, prev.origin) } // There may be some cached account/storage data already since IntermediateRoot // will be called for each transaction before byzantium fork which will always @@ -741,32 +779,42 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) func (s *StateDB) CreateAccount(addr common.Address) { newObj, prev := s.createObject(addr) if prev != nil { - newObj.setBalance(prev.data.Balance) + newObj.setBalance(prev.Balance()) } } +// CopyWithMvStates will copy state with MVStates +func (s *StateDB) CopyWithMvStates() *StateDB { + state := s.Copy() + if s.mvStates != nil { + state.mvStates = s.mvStates + } + return state +} + // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { // Copy all the basic fields, initialize the memory ones state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - originalRoot: s.originalRoot, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), + db: s.db, + trie: s.db.CopyTrie(s.trie), + originalRoot: s.originalRoot, + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), + stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), + stateObjectsDestructDirty: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestructDirty)), + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), // In order for the block producer to be able to use and make additions // to the snapshot tree, we need to copy that as well. Otherwise, any @@ -811,6 +859,9 @@ func (s *StateDB) Copy() *StateDB { for addr, value := range s.stateObjectsDestruct { state.stateObjectsDestruct[addr] = value } + for addr, value := range s.stateObjectsDestructDirty { + state.stateObjectsDestructDirty[addr] = value + } // Deep copy the state changes made in the scope of block // along with their original values. state.accounts = copySet(s.accounts) @@ -883,6 +934,12 @@ func (s *StateDB) GetRefund() uint64 { // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + + // finalise stateObjectsDestruct + for addr, acc := range s.stateObjectsDestructDirty { + s.stateObjectsDestruct[addr] = acc + } + s.stateObjectsDestructDirty = make(map[common.Address]*types.StateAccount) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] if !exist { @@ -1632,6 +1689,141 @@ func (s *StateDB) GetSnap() snapshot.Snapshot { return s.snap } +func (s *StateDB) BeforeTxTransition() { + log.Debug("BeforeTxTransition", "mvStates", s.mvStates == nil, "rwSet", s.rwSet == nil) + if s.mvStates == nil { + return + } + s.rwSet = types.NewRWSet(types.StateVersion{ + TxIndex: s.txIndex, + }) +} + +func (s *StateDB) BeginTxStat(index int) { + if s.mvStates == nil { + return + } + if metrics.EnabledExpensive { + s.stat = types.NewExeStat(index).Begin() + } +} + +func (s *StateDB) StopTxStat(usedGas uint64) { + if s.mvStates == nil { + return + } + // record stat first + if metrics.EnabledExpensive && s.stat != nil { + s.stat.Done().WithGas(usedGas).WithRead(len(s.rwSet.ReadSet())) + } +} + +func (s *StateDB) RecordRead(key types.RWKey, val interface{}) { + if s.rwSet == nil || s.rwSet.RWRecordDone() { + return + } + s.rwSet.RecordRead(key, types.StateVersion{ + TxIndex: -1, + }, val) +} + +func (s *StateDB) RecordWrite(key types.RWKey, val interface{}) { + if s.rwSet == nil || s.rwSet.RWRecordDone() { + return + } + s.rwSet.RecordWrite(key, val) +} + +func (s *StateDB) ResetMVStates(txCount int) { + s.mvStates = types.NewMVStates(txCount) + s.rwSet = nil +} + +func (s *StateDB) FinaliseRWSet() error { + if s.rwSet == nil || s.rwSet.RWRecordDone() { + return nil + } + if metrics.EnabledExpensive { + defer func(start time.Time) { + s.TxDAGGenerate += time.Since(start) + }(time.Now()) + } + ver := types.StateVersion{ + TxIndex: s.txIndex, + } + if ver != s.rwSet.Version() { + return errors.New("you finalize a wrong ver of RWSet") + } + + // finalise stateObjectsDestruct + for addr := range s.stateObjectsDestructDirty { + s.RecordWrite(types.AccountStateKey(addr, types.AccountSuicide), struct{}{}) + } + for addr := range s.journal.dirties { + obj, exist := s.stateObjects[addr] + if !exist { + continue + } + if obj.selfDestructed || obj.empty() { + // We need to maintain account deletions explicitly (will remain + // set indefinitely). Note only the first occurred self-destruct + // event is tracked. + if _, ok := s.stateObjectsDestruct[obj.address]; !ok { + log.Debug("FinaliseRWSet find Destruct", "tx", s.txIndex, "addr", addr, "selfDestructed", obj.selfDestructed) + s.RecordWrite(types.AccountStateKey(addr, types.AccountSuicide), struct{}{}) + } + } else { + // finalise account & storages + obj.finaliseRWSet() + } + } + + s.rwSet.SetRWRecordDone() + return s.mvStates.FulfillRWSet(s.rwSet, s.stat) +} + +func (s *StateDB) getStateObjectsDestruct(addr common.Address) (*types.StateAccount, bool) { + if acc, ok := s.stateObjectsDestructDirty[addr]; ok { + return acc, ok + } + acc, ok := s.stateObjectsDestruct[addr] + return acc, ok +} + +func (s *StateDB) setStateObjectsDestruct(addr common.Address, acc *types.StateAccount) { + s.stateObjectsDestructDirty[addr] = acc +} + +func (s *StateDB) removeStateObjectsDestruct(addr common.Address) { + delete(s.stateObjectsDestructDirty, addr) +} + +func (s *StateDB) ResolveTxDAG(gasFeeReceivers []common.Address) (types.TxDAG, map[int]*types.ExeStat) { + if s.mvStates == nil { + return types.NewEmptyTxDAG(), nil + } + if metrics.EnabledExpensive { + defer func(start time.Time) { + s.TxDAGGenerate += time.Since(start) + }(time.Now()) + } + + return s.mvStates.ResolveTxDAG(gasFeeReceivers), s.mvStates.Stats() +} + +func (s *StateDB) MVStates() *types.MVStates { + return s.mvStates +} + +func (s *StateDB) RecordSystemTxRWSet(index int) { + if s.mvStates == nil { + return + } + s.mvStates.FulfillRWSet(types.NewRWSet(types.StateVersion{ + TxIndex: index, + }).WithSerialFlag(), types.NewExeStat(index).WithSerialFlag()) +} + // copySet returns a deep-copied set. func copySet[k comparable](set map[k][]byte) map[k][]byte { copied := make(map[k][]byte, len(set)) diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index b416bcf1f3..fd02f4c0cc 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -265,7 +265,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Databa return err } if len(oBlob) != 0 { - return fmt.Errorf("unexpected account in old trie, %x", addrHash) + return fmt.Errorf("unexpected account in old trie, %v", addr) } if len(nBlob) == 0 { return fmt.Errorf("missing account in new trie, %x", addrHash) diff --git a/core/state_processor.go b/core/state_processor.go index c9df98536c..9e300b5ddc 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -22,6 +22,8 @@ import ( "math/big" "time" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" @@ -90,8 +92,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) } statedb.MarkFullProcessed() + if p.bc.enableTxDAG { + statedb.ResetMVStates(len(block.Transactions())) + } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { + statedb.BeginTxStat(i) start := time.Now() msg, err := TransactionToMessage(tx, signer, header.BaseFee) if err != nil { @@ -108,6 +114,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if metrics.EnabledExpensive { processTxTimer.UpdateSince(start) } + statedb.StopTxStat(receipt.GasUsed) } // Fail if Shanghai not enabled and len(withdrawals) is non-zero. withdrawals := block.Withdrawals() @@ -117,6 +124,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) + if p.bc.enableTxDAG { + // compare input TxDAG when it enable in consensus + dag, extraStats := statedb.ResolveTxDAG([]common.Address{context.Coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + // TODO(galaio): check TxDAG correctness? + log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) + if metrics.EnabledExpensive { + types.EvaluateTxDAGPerformance(dag, extraStats) + } + } return receipts, allLogs, *usedGas, nil } diff --git a/core/state_transition.go b/core/state_transition.go index a23a26468e..0814c85256 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -443,6 +443,8 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { + // start record rw set in here + st.state.BeforeTxTransition() // First check this message satisfies all consensus rules before // applying the message. The rules include these clauses // @@ -534,6 +536,11 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { ReturnData: ret, }, nil } + // stop record rw set in here, skip gas fee distribution + if err := st.state.FinaliseRWSet(); err != nil { + return nil, err + } + // Note for deposit tx there is no ETH refunded for unused gas, but that's taken care of by the fact that gasPrice // is always 0 for deposit tx. So calling refundGas will ensure the gasUsed accounting is correct without actually // changing the sender's balance diff --git a/core/types/dag.go b/core/types/dag.go new file mode 100644 index 0000000000..801ec476a2 --- /dev/null +++ b/core/types/dag.go @@ -0,0 +1,455 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/exp/slices" +) + +// TxDAGType Used to extend TxDAG and customize a new DAG structure +const ( + EmptyTxDAGType byte = iota + PlainTxDAGType +) + +type TxDAG interface { + // Type return TxDAG type + Type() byte + + // Inner return inner instance + Inner() interface{} + + // DelayGasDistribution check if delay the distribution of GasFee + DelayGasDistribution() bool + + // TxDep query TxDeps from TxDAG + TxDep(int) TxDep + + // TxCount return tx count + TxCount() int + + // SetTxDep at the last one + SetTxDep(int, TxDep) error +} + +func EncodeTxDAG(dag TxDAG) ([]byte, error) { + if dag == nil { + return nil, errors.New("input nil TxDAG") + } + var buf bytes.Buffer + buf.WriteByte(dag.Type()) + if err := rlp.Encode(&buf, dag.Inner()); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func DecodeTxDAG(enc []byte) (TxDAG, error) { + if len(enc) <= 1 { + return nil, errors.New("too short TxDAG bytes") + } + + switch enc[0] { + case EmptyTxDAGType: + return NewEmptyTxDAG(), nil + case PlainTxDAGType: + dag := new(PlainTxDAG) + if err := rlp.DecodeBytes(enc[1:], dag); err != nil { + return nil, err + } + return dag, nil + default: + return nil, errors.New("unsupported TxDAG bytes") + } +} + +// EmptyTxDAG indicate that execute txs in sequence +// It means no transactions or need timely distribute transaction fees +// it only keep partial serial execution when tx cannot delay the distribution or just execute txs in sequence +type EmptyTxDAG struct { +} + +func NewEmptyTxDAG() TxDAG { + return &EmptyTxDAG{} +} + +func (d *EmptyTxDAG) Type() byte { + return EmptyTxDAGType +} + +func (d *EmptyTxDAG) Inner() interface{} { + return d +} + +func (d *EmptyTxDAG) DelayGasDistribution() bool { + return false +} + +func (d *EmptyTxDAG) TxDep(int) TxDep { + return TxDep{ + Relation: 1, + TxIndexes: nil, + } +} + +func (d *EmptyTxDAG) TxCount() int { + return 0 +} + +func (d *EmptyTxDAG) SetTxDep(int, TxDep) error { + return nil +} + +func (d *EmptyTxDAG) String() string { + return "None" +} + +// PlainTxDAG indicate how to use the dependency of txs, and delay the distribution of GasFee +type PlainTxDAG struct { + // Tx Dependency List, the list index is equal to TxIndex + TxDeps []TxDep +} + +func (d *PlainTxDAG) Type() byte { + return PlainTxDAGType +} + +func (d *PlainTxDAG) Inner() interface{} { + return d +} + +func (d *PlainTxDAG) DelayGasDistribution() bool { + return true +} + +func (d *PlainTxDAG) TxDep(i int) TxDep { + return d.TxDeps[i] +} + +func (d *PlainTxDAG) TxCount() int { + return len(d.TxDeps) +} + +func (d *PlainTxDAG) SetTxDep(i int, dep TxDep) error { + if i < 0 || i > len(d.TxDeps) { + return fmt.Errorf("SetTxDep with wrong index: %d", i) + } + if i < len(d.TxDeps) { + d.TxDeps[i] = dep + return nil + } + d.TxDeps = append(d.TxDeps, dep) + return nil +} + +func NewPlainTxDAG(txLen int) *PlainTxDAG { + return &PlainTxDAG{ + TxDeps: make([]TxDep, txLen), + } +} + +func (d *PlainTxDAG) String() string { + builder := strings.Builder{} + exePaths := travelTxDAGExecutionPaths(d) + for _, path := range exePaths { + builder.WriteString(fmt.Sprintf("%v\n", path)) + } + return builder.String() +} + +func (d *PlainTxDAG) Size() int { + enc, err := EncodeTxDAG(d) + if err != nil { + return 0 + } + return len(enc) +} + +// MergeTxDAGExecutionPaths will merge duplicate tx path for scheduling parallel. +// Any tx cannot exist in >= 2 paths. +func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { + mergeMap := make(map[uint64][]uint64, d.TxCount()) + txMap := make(map[uint64]uint64, d.TxCount()) + for i := d.TxCount() - 1; i >= 0; i-- { + index, merge := uint64(i), uint64(i) + deps := d.TxDep(i).TxIndexes + if oldIdx, exist := findTxPathIndex(deps, index, txMap); exist { + merge = oldIdx + } + for _, tx := range deps { + txMap[tx] = merge + } + txMap[index] = merge + } + + // result by index order + for f, t := range txMap { + if mergeMap[t] == nil { + mergeMap[t] = make([]uint64, 0) + } + mergeMap[t] = append(mergeMap[t], f) + } + mergePaths := make([][]uint64, 0, len(mergeMap)) + for i := 0; i < d.TxCount(); i++ { + path, ok := mergeMap[uint64(i)] + if !ok { + continue + } + slices.Sort(path) + mergePaths = append(mergePaths, path) + } + + return mergePaths +} + +func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64, bool) { + if old, ok := txMap[cur]; ok { + return old, true + } + + for _, index := range path { + if old, ok := txMap[index]; ok { + return old, true + } + } + + return 0, false +} + +// travelTxDAGExecutionPaths will print all tx execution path +func travelTxDAGExecutionPaths(d TxDAG) [][]uint64 { + txCount := d.TxCount() + deps := make([]TxDep, txCount) + for i := 0; i < txCount; i++ { + dep := d.TxDep(i) + if dep.Relation == 0 { + deps[i] = dep + continue + } + // recover to relation 0 + for j := 0; j < i; j++ { + if !dep.Exist(j) { + deps[i].AppendDep(j) + } + } + } + + exePaths := make([][]uint64, 0) + // travel tx deps with BFS + for i := uint64(0); i < uint64(txCount); i++ { + exePaths = append(exePaths, travelTxDAGTargetPath(deps, i)) + } + return exePaths +} + +// TxDep store the current tx dependency relation with other txs +type TxDep struct { + // It describes the Relation with below txs + // 0: this tx depends on below txs + // 1: this transaction does not depend on below txs, all other previous txs depend on + Relation uint8 + TxIndexes []uint64 +} + +func (d *TxDep) AppendDep(i int) { + d.TxIndexes = append(d.TxIndexes, uint64(i)) +} + +func (d *TxDep) Exist(i int) bool { + for _, index := range d.TxIndexes { + if index == uint64(i) { + return true + } + } + + return false +} + +func (d *TxDep) Count() int { + return len(d.TxIndexes) +} + +func (d *TxDep) Last() int { + if d.Count() == 0 { + return -1 + } + return int(d.TxIndexes[len(d.TxIndexes)-1]) +} + +var ( + longestTimeTimer = metrics.NewRegisteredTimer("dag/longesttime", nil) + longestGasTimer = metrics.NewRegisteredTimer("dag/longestgas", nil) + serialTimeTimer = metrics.NewRegisteredTimer("dag/serialtime", nil) + totalTxMeter = metrics.NewRegisteredMeter("dag/txcnt", nil) + totalNoDepMeter = metrics.NewRegisteredMeter("dag/nodepcnt", nil) + total2DepMeter = metrics.NewRegisteredMeter("dag/2depcnt", nil) + total4DepMeter = metrics.NewRegisteredMeter("dag/4depcnt", nil) + total8DepMeter = metrics.NewRegisteredMeter("dag/8depcnt", nil) + total16DepMeter = metrics.NewRegisteredMeter("dag/16depcnt", nil) + total32DepMeter = metrics.NewRegisteredMeter("dag/32depcnt", nil) +) + +func EvaluateTxDAGPerformance(dag TxDAG, stats map[int]*ExeStat) { + if len(stats) != dag.TxCount() || dag.TxCount() == 0 { + return + } + paths := travelTxDAGExecutionPaths(dag) + // Attention: this is based on best schedule, it will reduce a lot by executing previous txs in parallel + // It assumes that there is no parallel thread limit + txCount := dag.TxCount() + var ( + maxGasIndex int + maxGas uint64 + maxTimeIndex int + maxTime time.Duration + txTimes = make([]time.Duration, txCount) + txGases = make([]uint64, txCount) + txReads = make([]int, txCount) + noDepCnt int + ) + + totalTxMeter.Mark(int64(txCount)) + for i, path := range paths { + if stats[i].mustSerial { + continue + } + if len(path) <= 1 { + noDepCnt++ + totalNoDepMeter.Mark(1) + } + if len(path) <= 3 { + total2DepMeter.Mark(1) + } + if len(path) <= 5 { + total4DepMeter.Mark(1) + } + if len(path) <= 9 { + total8DepMeter.Mark(1) + } + if len(path) <= 17 { + total16DepMeter.Mark(1) + } + if len(path) <= 33 { + total32DepMeter.Mark(1) + } + + // find the biggest cost time from dependency txs + for j := 0; j < len(path)-1; j++ { + prev := path[j] + if txTimes[prev] > txTimes[i] { + txTimes[i] = txTimes[prev] + } + if txGases[prev] > txGases[i] { + txGases[i] = txGases[prev] + } + if txReads[prev] > txReads[i] { + txReads[i] = txReads[prev] + } + } + txTimes[i] += stats[i].costTime + txGases[i] += stats[i].usedGas + txReads[i] += stats[i].readCount + + // try to find max gas + if txGases[i] > maxGas { + maxGas = txGases[i] + maxGasIndex = i + } + if txTimes[i] > maxTime { + maxTime = txTimes[i] + maxTimeIndex = i + } + } + + longestTimeTimer.Update(txTimes[maxTimeIndex]) + longestGasTimer.Update(txTimes[maxGasIndex]) + // serial path + var ( + sTime time.Duration + sGas uint64 + sRead int + sPath []int + ) + for i, stat := range stats { + if stat.mustSerial { + continue + } + sPath = append(sPath, i) + sTime += stat.costTime + sGas += stat.usedGas + sRead += stat.readCount + } + serialTimeTimer.Update(sTime) +} + +// travelTxDAGTargetPath will print target execution path +func travelTxDAGTargetPath(deps []TxDep, from uint64) []uint64 { + queue := make([]uint64, 0, len(deps)) + path := make([]uint64, 0, len(deps)) + + queue = append(queue, from) + path = append(path, from) + for len(queue) > 0 { + next := make([]uint64, 0, len(deps)) + for _, i := range queue { + for _, dep := range deps[i].TxIndexes { + if !slices.Contains(path, dep) { + path = append(path, dep) + next = append(next, dep) + } + } + } + queue = next + } + slices.Sort(path) + return path +} + +// ExeStat records tx execution info +type ExeStat struct { + txIndex int + usedGas uint64 + readCount int + startTime time.Time + costTime time.Duration + + // some flags + mustSerial bool +} + +func NewExeStat(txIndex int) *ExeStat { + return &ExeStat{ + txIndex: txIndex, + } +} + +func (s *ExeStat) Begin() *ExeStat { + s.startTime = time.Now() + return s +} + +func (s *ExeStat) Done() *ExeStat { + s.costTime = time.Since(s.startTime) + return s +} + +func (s *ExeStat) WithSerialFlag() *ExeStat { + s.mustSerial = true + return s +} + +func (s *ExeStat) WithGas(gas uint64) *ExeStat { + s.usedGas = gas + return s +} + +func (s *ExeStat) WithRead(rc int) *ExeStat { + s.readCount = rc + return s +} diff --git a/core/types/dag_test.go b/core/types/dag_test.go new file mode 100644 index 0000000000..1c0b334a8d --- /dev/null +++ b/core/types/dag_test.go @@ -0,0 +1,174 @@ +package types + +import ( + "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +var ( + mockAddr = common.HexToAddress("0x482bA86399ab6Dcbe54071f8d22258688B4509b1") + mockHash = common.HexToHash("0xdc13f8d7bdb8ec4de02cd4a50a1aa2ab73ec8814e0cdb550341623be3dd8ab7a") +) + +func TestTxDAG(t *testing.T) { + dag := mockSimpleDAG() + require.NoError(t, dag.SetTxDep(9, TxDep{ + Relation: 1, + TxIndexes: nil, + })) + require.NoError(t, dag.SetTxDep(10, TxDep{ + Relation: 1, + TxIndexes: nil, + })) + require.Error(t, dag.SetTxDep(12, TxDep{ + Relation: 1, + TxIndexes: nil, + })) + dag = NewEmptyTxDAG() + require.NoError(t, dag.SetTxDep(0, TxDep{ + Relation: 1, + TxIndexes: nil, + })) + require.NoError(t, dag.SetTxDep(11, TxDep{ + Relation: 1, + TxIndexes: nil, + })) +} + +func TestTxDAG_SetTxDep(t *testing.T) { + dag := mockSimpleDAG() + t.Log(dag) + dag = mockSystemTxDAG() + t.Log(dag) +} + +func TestEvaluateTxDAG(t *testing.T) { + dag := mockSystemTxDAG() + stats := make(map[int]*ExeStat, dag.TxCount()) + for i := 0; i < dag.TxCount(); i++ { + stats[i] = NewExeStat(i).WithGas(uint64(i)).WithRead(i) + stats[i].costTime = time.Duration(i) + if dag.TxDep(i).Relation == 1 { + stats[i].WithSerialFlag() + } + } + EvaluateTxDAGPerformance(dag, stats) +} + +func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { + paths := MergeTxDAGExecutionPaths(mockSimpleDAG()) + require.Equal(t, [][]uint64{ + {0, 3, 4}, + {1, 2, 5, 6, 7}, + {8, 9}, + }, paths) +} + +func TestMergeTxDAGExecutionPaths_Random(t *testing.T) { + dag := mockRandomDAG(10000) + paths := MergeTxDAGExecutionPaths(dag) + txMap := make(map[uint64]uint64, dag.TxCount()) + for _, path := range paths { + for _, index := range path { + old, ok := txMap[index] + require.False(t, ok, index, path, old) + txMap[index] = path[0] + } + } + require.Equal(t, dag.TxCount(), len(txMap)) +} + +func BenchmarkMergeTxDAGExecutionPaths(b *testing.B) { + dag := mockRandomDAG(100000) + for i := 0; i < b.N; i++ { + MergeTxDAGExecutionPaths(dag) + } +} + +func mockSimpleDAG() TxDAG { + dag := NewPlainTxDAG(10) + dag.TxDeps[0].TxIndexes = []uint64{} + dag.TxDeps[1].TxIndexes = []uint64{} + dag.TxDeps[2].TxIndexes = []uint64{} + dag.TxDeps[3].TxIndexes = []uint64{0} + dag.TxDeps[4].TxIndexes = []uint64{0} + dag.TxDeps[5].TxIndexes = []uint64{1, 2} + dag.TxDeps[6].TxIndexes = []uint64{2, 5} + dag.TxDeps[7].TxIndexes = []uint64{6} + dag.TxDeps[8].TxIndexes = []uint64{} + dag.TxDeps[9].TxIndexes = []uint64{8} + return dag +} + +func mockRandomDAG(txLen int) TxDAG { + dag := NewPlainTxDAG(txLen) + for i := 0; i < txLen; i++ { + var deps []uint64 + if i == 0 || rand.Bool() { + dag.TxDeps[i].TxIndexes = deps + continue + } + depCnt := rand.Int()%i + 1 + for j := 0; j < depCnt; j++ { + var dep uint64 + if j > 0 && deps[j-1]+1 == uint64(i) { + break + } + if j > 0 { + dep = uint64(rand.Int())%(uint64(i)-deps[j-1]-1) + deps[j-1] + 1 + } else { + dep = uint64(rand.Int() % i) + } + deps = append(deps, dep) + } + dag.TxDeps[i].TxIndexes = deps + } + return dag +} + +func mockSystemTxDAG() TxDAG { + dag := NewPlainTxDAG(12) + dag.TxDeps[0].TxIndexes = []uint64{} + dag.TxDeps[1].TxIndexes = []uint64{} + dag.TxDeps[2].TxIndexes = []uint64{} + dag.TxDeps[3].TxIndexes = []uint64{0} + dag.TxDeps[4].TxIndexes = []uint64{0} + dag.TxDeps[5].TxIndexes = []uint64{1, 2} + dag.TxDeps[6].TxIndexes = []uint64{2, 5} + dag.TxDeps[7].TxIndexes = []uint64{6} + dag.TxDeps[8].TxIndexes = []uint64{} + dag.TxDeps[9].TxIndexes = []uint64{8} + dag.TxDeps[10] = TxDep{ + Relation: 1, + TxIndexes: []uint64{}, + } + dag.TxDeps[11] = TxDep{ + Relation: 1, + TxIndexes: []uint64{}, + } + return dag +} + +func TestTxDAG_Encode_Decode(t *testing.T) { + expected := TxDAG(&EmptyTxDAG{}) + enc, err := EncodeTxDAG(expected) + require.NoError(t, err) + actual, err := DecodeTxDAG(enc) + require.NoError(t, err) + require.Equal(t, expected, actual) + + expected = mockSimpleDAG() + enc, err = EncodeTxDAG(expected) + require.NoError(t, err) + actual, err = DecodeTxDAG(enc) + require.NoError(t, err) + require.Equal(t, expected, actual) + enc[0] = 2 + _, err = DecodeTxDAG(enc) + require.Error(t, err) +} diff --git a/core/types/mvstates.go b/core/types/mvstates.go new file mode 100644 index 0000000000..85d3886470 --- /dev/null +++ b/core/types/mvstates.go @@ -0,0 +1,517 @@ +package types + +import ( + "encoding/hex" + "errors" + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/metrics" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" + "golang.org/x/exp/slices" +) + +const ( + AccountStatePrefix = 'a' + StorageStatePrefix = 's' +) + +type RWKey [1 + common.AddressLength + common.HashLength]byte + +type AccountState byte + +const ( + AccountSelf AccountState = iota + AccountNonce + AccountBalance + AccountCodeHash + AccountSuicide +) + +func AccountStateKey(account common.Address, state AccountState) RWKey { + var key RWKey + key[0] = AccountStatePrefix + copy(key[1:], account.Bytes()) + key[1+common.AddressLength] = byte(state) + return key +} + +func StorageStateKey(account common.Address, state common.Hash) RWKey { + var key RWKey + key[0] = StorageStatePrefix + copy(key[1:], account.Bytes()) + copy(key[1+common.AddressLength:], state.Bytes()) + return key +} + +func (key *RWKey) IsAccountState() (bool, AccountState) { + return AccountStatePrefix == key[0], AccountState(key[1+common.AddressLength]) +} + +func (key *RWKey) IsAccountSelf() bool { + ok, s := key.IsAccountState() + if !ok { + return false + } + return s == AccountSelf +} + +func (key *RWKey) IsAccountSuicide() bool { + ok, s := key.IsAccountState() + if !ok { + return false + } + return s == AccountSuicide +} + +func (key *RWKey) ToAccountSelf() RWKey { + return AccountStateKey(key.Addr(), AccountSelf) +} + +func (key *RWKey) IsStorageState() bool { + return StorageStatePrefix == key[0] +} + +func (key *RWKey) String() string { + return hex.EncodeToString(key[:]) +} + +func (key *RWKey) Addr() common.Address { + return common.BytesToAddress(key[1 : 1+common.AddressLength]) +} + +// StateVersion record specific TxIndex & TxIncarnation +// if TxIndex equals to -1, it means the state read from DB. +type StateVersion struct { + TxIndex int + // Tx incarnation used for multi ver state + TxIncarnation int +} + +// RWSet record all read & write set in txs +// Attention: this is not a concurrent safety structure +type RWSet struct { + ver StateVersion + readSet map[RWKey]*RWItem + writeSet map[RWKey]*RWItem + + rwRecordDone bool + mustSerial bool +} + +func NewRWSet(ver StateVersion) *RWSet { + return &RWSet{ + ver: ver, + readSet: make(map[RWKey]*RWItem), + writeSet: make(map[RWKey]*RWItem), + } +} + +func (s *RWSet) RecordRead(key RWKey, ver StateVersion, val interface{}) { + // only record the first read version + if _, exist := s.readSet[key]; exist { + return + } + s.readSet[key] = &RWItem{ + Ver: ver, + Val: val, + } +} + +func (s *RWSet) RecordWrite(key RWKey, val interface{}) { + wr, exist := s.writeSet[key] + if !exist { + s.writeSet[key] = &RWItem{ + Ver: s.ver, + Val: val, + } + return + } + wr.Val = val +} + +func (s *RWSet) Version() StateVersion { + return s.ver +} + +func (s *RWSet) ReadSet() map[RWKey]*RWItem { + return s.readSet +} + +func (s *RWSet) WriteSet() map[RWKey]*RWItem { + return s.writeSet +} + +func (s *RWSet) WithSerialFlag() *RWSet { + s.mustSerial = true + return s +} + +func (s *RWSet) RWRecordDone() bool { + return s.rwRecordDone +} + +func (s *RWSet) SetRWRecordDone() { + s.rwRecordDone = true +} + +func (s *RWSet) String() string { + builder := strings.Builder{} + builder.WriteString(fmt.Sprintf("tx: %v, inc: %v\nreadSet: [", s.ver.TxIndex, s.ver.TxIncarnation)) + i := 0 + for key, _ := range s.readSet { + if i > 0 { + builder.WriteString(fmt.Sprintf(", %v", key.String())) + continue + } + builder.WriteString(fmt.Sprintf("%v", key.String())) + i++ + } + builder.WriteString("]\nwriteSet: [") + i = 0 + for key, _ := range s.writeSet { + if i > 0 { + builder.WriteString(fmt.Sprintf(", %v", key.String())) + continue + } + builder.WriteString(fmt.Sprintf("%v", key.String())) + i++ + } + builder.WriteString("]\n") + return builder.String() +} + +// isEqualRWVal compare state +func isEqualRWVal(key RWKey, src interface{}, compared interface{}) bool { + if ok, state := key.IsAccountState(); ok { + switch state { + case AccountBalance: + if src != nil && compared != nil { + return equalUint256(src.(*uint256.Int), compared.(*uint256.Int)) + } + return src == compared + case AccountNonce: + return src.(uint64) == compared.(uint64) + case AccountCodeHash: + if src != nil && compared != nil { + return slices.Equal(src.([]byte), compared.([]byte)) + } + return src == compared + } + return false + } + + if src != nil && compared != nil { + return src.(common.Hash) == compared.(common.Hash) + } + return src == compared +} + +func equalUint256(s, c *uint256.Int) bool { + if s != nil && c != nil { + return s.Eq(c) + } + + return s == c +} + +type RWItem struct { + Ver StateVersion + Val interface{} +} + +func NewRWItem(ver StateVersion, val interface{}) *RWItem { + return &RWItem{ + Ver: ver, + Val: val, + } +} + +func (w *RWItem) TxIndex() int { + return w.Ver.TxIndex +} + +func (w *RWItem) TxIncarnation() int { + return w.Ver.TxIncarnation +} + +type PendingWrites struct { + list []*RWItem +} + +func NewPendingWrites() *PendingWrites { + return &PendingWrites{ + list: make([]*RWItem, 0), + } +} + +func (w *PendingWrites) Append(pw *RWItem) { + if i, found := w.SearchTxIndex(pw.TxIndex()); found { + w.list[i] = pw + return + } + + w.list = append(w.list, pw) + for i := len(w.list) - 1; i > 0; i-- { + if w.list[i].TxIndex() > w.list[i-1].TxIndex() { + break + } + w.list[i-1], w.list[i] = w.list[i], w.list[i-1] + } +} + +func (w *PendingWrites) SearchTxIndex(txIndex int) (int, bool) { + n := len(w.list) + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) + // i ≤ h < j + if w.list[h].TxIndex() < txIndex { + i = h + 1 + } else { + j = h + } + } + return i, i < n && w.list[i].TxIndex() == txIndex +} + +func (w *PendingWrites) FindLastWrite(txIndex int) *RWItem { + var i, _ = w.SearchTxIndex(txIndex) + for j := i - 1; j >= 0; j-- { + if w.list[j].TxIndex() < txIndex { + return w.list[j] + } + } + + return nil +} + +type MVStates struct { + rwSets map[int]*RWSet + pendingWriteSet map[RWKey]*PendingWrites + nextFinaliseIndex int + + // dependency map cache for generating TxDAG + // depsCache[i].exist(j) means j->i, and i > j + depsCache map[int]TxDepMap + + // execution stat infos + stats map[int]*ExeStat + lock sync.RWMutex +} + +func NewMVStates(txCount int) *MVStates { + return &MVStates{ + rwSets: make(map[int]*RWSet, txCount), + pendingWriteSet: make(map[RWKey]*PendingWrites, txCount*8), + depsCache: make(map[int]TxDepMap, txCount), + stats: make(map[int]*ExeStat, txCount), + } +} + +func (s *MVStates) RWSets() map[int]*RWSet { + s.lock.RLock() + defer s.lock.RUnlock() + return s.rwSets +} + +func (s *MVStates) Stats() map[int]*ExeStat { + s.lock.RLock() + defer s.lock.RUnlock() + return s.stats +} + +func (s *MVStates) RWSet(index int) *RWSet { + s.lock.RLock() + defer s.lock.RUnlock() + if index >= len(s.rwSets) { + return nil + } + return s.rwSets[index] +} + +// ReadState read state from MVStates +func (s *MVStates) ReadState(txIndex int, key RWKey) *RWItem { + s.lock.RLock() + defer s.lock.RUnlock() + + wset, ok := s.pendingWriteSet[key] + if !ok { + return nil + } + return wset.FindLastWrite(txIndex) +} + +// FulfillRWSet it can execute as async, and rwSet & stat must guarantee read-only +// try to generate TxDAG, when fulfill RWSet +func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { + log.Debug("FulfillRWSet", "total", len(s.rwSets), "cur", rwSet.ver.TxIndex, "reads", len(rwSet.readSet), "writes", len(rwSet.writeSet)) + s.lock.Lock() + defer s.lock.Unlock() + index := rwSet.ver.TxIndex + if index < s.nextFinaliseIndex { + return errors.New("fulfill a finalized RWSet") + } + if stat != nil { + if stat.txIndex != index { + return errors.New("wrong execution stat") + } + s.stats[index] = stat + } + + if metrics.EnabledExpensive { + for k := range rwSet.writeSet { + // this action is only for testing, it runs when enable expensive metrics. + checkRWSetInconsistent(index, k, rwSet.readSet, rwSet.writeSet) + } + } + s.resolveDepsCache(index, rwSet) + s.rwSets[index] = rwSet + return nil +} + +// Finalise it will put target write set into pending writes. +func (s *MVStates) Finalise(index int) error { + log.Debug("Finalise", "total", len(s.rwSets), "index", index) + s.lock.Lock() + defer s.lock.Unlock() + + rwSet := s.rwSets[index] + if rwSet == nil { + return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) + } + + if index != s.nextFinaliseIndex { + return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) + } + + // append to pending write set + for k, v := range rwSet.writeSet { + if _, exist := s.pendingWriteSet[k]; !exist { + s.pendingWriteSet[k] = NewPendingWrites() + } + s.pendingWriteSet[k].Append(v) + } + s.nextFinaliseIndex++ + return nil +} + +func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { + // analysis dep, if the previous transaction is not executed/validated, re-analysis is required + s.depsCache[index] = NewTxDeps(0) + for prev := 0; prev < index; prev++ { + // if there are some parallel execution or system txs, it will fulfill in advance + // it's ok, and try re-generate later + if _, ok := s.rwSets[prev]; !ok { + continue + } + // check if there has written op before i + if checkDependency(s.rwSets[prev].writeSet, rwSet.readSet) { + s.depsCache[index].add(prev) + // clear redundancy deps compared with prev + for dep := range s.depsCache[index] { + if s.depsCache[prev].exist(dep) { + s.depsCache[index].remove(dep) + } + } + } + } +} + +func checkRWSetInconsistent(index int, k RWKey, readSet map[RWKey]*RWItem, writeSet map[RWKey]*RWItem) bool { + var ( + readOk bool + writeOk bool + r *RWItem + ) + + if k.IsAccountSuicide() { + _, readOk = readSet[k.ToAccountSelf()] + } else { + _, readOk = readSet[k] + } + + r, writeOk = writeSet[k] + if readOk != writeOk { + // check if it's correct? read nil, write non-nil + log.Warn("checkRWSetInconsistent find inconsistent", "tx", index, "k", k.String(), "read", readOk, "write", writeOk, "val", r.Val) + return true + } + + return false +} + +// ResolveTxDAG generate TxDAG from RWSets +func (s *MVStates) ResolveTxDAG(gasFeeReceivers []common.Address) TxDAG { + rwSets := s.RWSets() + txDAG := NewPlainTxDAG(len(rwSets)) + for i := len(rwSets) - 1; i >= 0; i-- { + // check if there are RW with gas fee receiver for gas delay calculation + for _, addr := range gasFeeReceivers { + if _, ok := rwSets[i].readSet[AccountStateKey(addr, AccountSelf)]; ok { + return NewEmptyTxDAG() + } + } + txDAG.TxDeps[i].TxIndexes = []uint64{} + if rwSets[i].mustSerial { + txDAG.TxDeps[i].Relation = 1 + continue + } + if s.depsCache[i] == nil { + s.resolveDepsCache(i, rwSets[i]) + } + txDAG.TxDeps[i].TxIndexes = s.depsCache[i].toArray() + } + + return txDAG +} + +func checkDependency(writeSet map[RWKey]*RWItem, readSet map[RWKey]*RWItem) bool { + // check tx dependency, only check key, skip version + for k, _ := range writeSet { + // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. + if k.IsAccountSuicide() { + if _, ok := readSet[k.ToAccountSelf()]; ok { + return true + } + continue + } + if _, ok := readSet[k]; ok { + return true + } + } + + return false +} + +type TxDepMap map[int]struct{} + +func NewTxDeps(cap int) TxDepMap { + return make(map[int]struct{}, cap) +} + +func (m TxDepMap) add(index int) { + m[index] = struct{}{} +} + +func (m TxDepMap) exist(index int) bool { + _, ok := m[index] + return ok +} + +func (m TxDepMap) toArray() []uint64 { + ret := make([]uint64, 0, len(m)) + for index := range m { + ret = append(ret, uint64(index)) + } + slices.Sort(ret) + return ret +} + +func (m TxDepMap) remove(index int) { + delete(m, index) +} diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go new file mode 100644 index 0000000000..9d4422bf1f --- /dev/null +++ b/core/types/mvstates_test.go @@ -0,0 +1,241 @@ +package types + +import ( + "testing" + + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +func TestMVStates_BasicUsage(t *testing.T) { + ms := NewMVStates(0) + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(0, []interface{}{"0x00", 0}, []interface{}{"0x00", 0}), nil)) + require.Nil(t, ms.ReadState(0, str2key("0x00"))) + require.NoError(t, ms.Finalise(0)) + require.Error(t, ms.Finalise(0)) + require.Error(t, ms.FulfillRWSet(mockRWSetWithVal(0, nil, nil), nil)) + require.Nil(t, ms.ReadState(0, str2key("0x00"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadState(1, str2key("0x00"))) + + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(1, []interface{}{"0x01", 1}, []interface{}{"0x01", 1}), nil)) + require.Nil(t, ms.ReadState(1, str2key("0x01"))) + require.NoError(t, ms.Finalise(1)) + require.Nil(t, ms.ReadState(0, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) + + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(2, []interface{}{"0x02", 2, "0x01", 1}, []interface{}{"0x01", 2, "0x02", 2}), nil)) + require.NoError(t, ms.Finalise(2)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadState(3, str2key("0x01"))) + + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(3, []interface{}{"0x03", 3, "0x00", 0, "0x01", 2}, []interface{}{"0x00", 3, "0x01", 3, "0x03", 3}), nil)) + require.Nil(t, ms.ReadState(3, str2key("0x03"))) + require.NoError(t, ms.Finalise(3)) + require.Nil(t, ms.ReadState(0, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadState(3, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadState(4, str2key("0x01"))) + require.Nil(t, ms.ReadState(0, str2key("0x00"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadState(5, str2key("0x00"))) +} + +func TestSimpleMVStates2TxDAG(t *testing.T) { + ms := NewMVStates(10) + + ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) + ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) + ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) + ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) + ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) + ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) + ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) + ms.rwSets[7] = mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}) + ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) + ms.rwSets[9] = mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}) + + dag := ms.ResolveTxDAG(nil) + require.Equal(t, mockSimpleDAG(), dag) + t.Log(dag) +} + +func TestSystemTxMVStates2TxDAG(t *testing.T) { + ms := NewMVStates(12) + + ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) + ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) + ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) + ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) + ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) + ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) + ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) + ms.rwSets[7] = mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}) + ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) + ms.rwSets[9] = mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}) + ms.rwSets[10] = mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithSerialFlag() + ms.rwSets[11] = mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithSerialFlag() + + dag := ms.ResolveTxDAG(nil) + require.Equal(t, mockSystemTxDAG(), dag) + t.Log(dag) +} + +func TestIsEqualRWVal(t *testing.T) { + tests := []struct { + key RWKey + src interface{} + compared interface{} + isEqual bool + }{ + { + key: AccountStateKey(mockAddr, AccountNonce), + src: uint64(0), + compared: uint64(0), + isEqual: true, + }, + { + key: AccountStateKey(mockAddr, AccountNonce), + src: uint64(0), + compared: uint64(1), + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountBalance), + src: new(uint256.Int).SetUint64(1), + compared: new(uint256.Int).SetUint64(1), + isEqual: true, + }, + { + key: AccountStateKey(mockAddr, AccountBalance), + src: nil, + compared: new(uint256.Int).SetUint64(1), + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountBalance), + src: (*uint256.Int)(nil), + compared: new(uint256.Int).SetUint64(1), + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountBalance), + src: (*uint256.Int)(nil), + compared: (*uint256.Int)(nil), + isEqual: true, + }, + { + key: AccountStateKey(mockAddr, AccountCodeHash), + src: []byte{1}, + compared: []byte{1}, + isEqual: true, + }, + { + key: AccountStateKey(mockAddr, AccountCodeHash), + src: nil, + compared: []byte{1}, + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountCodeHash), + src: ([]byte)(nil), + compared: []byte{1}, + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountCodeHash), + src: ([]byte)(nil), + compared: ([]byte)(nil), + isEqual: true, + }, + { + key: AccountStateKey(mockAddr, AccountSuicide), + src: struct{}{}, + compared: struct{}{}, + isEqual: false, + }, + { + key: AccountStateKey(mockAddr, AccountSuicide), + src: nil, + compared: struct{}{}, + isEqual: false, + }, + { + key: StorageStateKey(mockAddr, mockHash), + src: mockHash, + compared: mockHash, + isEqual: true, + }, + { + key: StorageStateKey(mockAddr, mockHash), + src: nil, + compared: mockHash, + isEqual: false, + }, + } + + for i, item := range tests { + require.Equal(t, item.isEqual, isEqualRWVal(item.key, item.src, item.compared), i) + } +} + +func mockRWSet(index int, read []string, write []string) *RWSet { + ver := StateVersion{ + TxIndex: index, + } + set := NewRWSet(ver) + for _, k := range read { + set.readSet[str2key(k)] = &RWItem{ + Ver: ver, + Val: struct{}{}, + } + } + for _, k := range write { + set.writeSet[str2key(k)] = &RWItem{ + Ver: ver, + Val: struct{}{}, + } + } + + return set +} + +func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet { + ver := StateVersion{ + TxIndex: index, + } + set := NewRWSet(ver) + + if len(read)%2 != 0 { + panic("wrong read size") + } + if len(write)%2 != 0 { + panic("wrong write size") + } + + for i := 0; i < len(read); { + set.readSet[str2key(read[i].(string))] = &RWItem{ + Ver: StateVersion{ + TxIndex: index - 1, + }, + Val: read[i+1], + } + i += 2 + } + for i := 0; i < len(write); { + set.writeSet[str2key(write[i].(string))] = &RWItem{ + Ver: ver, + Val: write[i+1], + } + i += 2 + } + + return set +} + +func str2key(k string) RWKey { + key := RWKey{} + if len(k) > len(key) { + k = k[:len(key)] + } + copy(key[:], k) + return key +} diff --git a/core/vm/interface.go b/core/vm/interface.go index 25bfa06720..a0ded99fce 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -79,6 +79,9 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) + + BeforeTxTransition() + FinaliseRWSet() error } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/eth/backend.go b/eth/backend.go index d96c20ce0f..ff5926a187 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -283,6 +283,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } + if config.EnableParallelTxDAG { + eth.blockchain.SetupTxDAGGeneration() + } if chainConfig := eth.blockchain.Config(); chainConfig.Optimism != nil { // config.Genesis.Config.ChainID cannot be used because it's based on CLI flags only, thus default to mainnet L1 config.NetworkId = chainConfig.ChainID.Uint64() // optimism defaults eth network ID to chain ID eth.networkID = config.NetworkId diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 383641ffc3..3ce1fa3df5 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -219,6 +219,7 @@ type Config struct { RollupHaltOnIncompatibleProtocolVersion string EnableOpcodeOptimizing bool + EnableParallelTxDAG bool } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/miner/miner.go b/miner/miner.go index 53755ad632..1f407851f4 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -63,7 +63,8 @@ var ( snapshotAccountReadTimer = metrics.NewRegisteredTimer("miner/snapshot/account/reads", nil) snapshotStorageReadTimer = metrics.NewRegisteredTimer("miner/snapshot/storage/reads", nil) - waitPayloadTimer = metrics.NewRegisteredTimer("miner/wait/payload", nil) + waitPayloadTimer = metrics.NewRegisteredTimer("miner/wait/payload", nil) + txDAGGenerateTimer = metrics.NewRegisteredTimer("miner/txdag/gen", nil) isBuildBlockInterruptCounter = metrics.NewRegisteredCounter("miner/build/interrupt", nil) ) diff --git a/miner/worker.go b/miner/worker.go index b8df413238..41c3d254de 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1190,6 +1190,9 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err w.mu.RUnlock() start := time.Now() + if w.chain.TxDAGEnabled() { + env.state.ResetMVStates(0) + } // Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees filter := txpool.PendingFilter{ MinTip: tip, @@ -1344,6 +1347,15 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { return &newPayloadResult{err: fmt.Errorf("empty block root")} } + // TODO(galaio): fulfill TxDAG to mined block + //if w.chain.TxDAGEnabled() && w.chainConfig.Optimism != nil { + // txDAG, _ := work.state.ResolveTxDAG([]common.Address{work.coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + // rawTxDAG, err := types.EncodeTxDAG(txDAG) + // if err != nil { + // return &newPayloadResult{err: err} + // } + //} + assembleBlockTimer.UpdateSince(start) log.Debug("assembleBlockTimer", "duration", common.PrettyDuration(time.Since(start)), "parentHash", genParams.parentHash) @@ -1355,6 +1367,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { storageUpdateTimer.Update(work.state.StorageUpdates) // Storage updates are complete(in FinalizeAndAssemble) accountHashTimer.Update(work.state.AccountHashes) // Account hashes are complete(in FinalizeAndAssemble) storageHashTimer.Update(work.state.StorageHashes) // Storage hashes are complete(in FinalizeAndAssemble) + txDAGGenerateTimer.Update(work.state.TxDAGGenerate) innerExecutionTimer.Update(core.DebugInnerExecutionDuration) From 22d858def1329591b6c31aed303521e98d0a2287 Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 2 Aug 2024 15:12:11 +0800 Subject: [PATCH 02/42] txdag: opt txdag encoding; evm: fix failed tx rwSet collecting; --- core/state/statedb.go | 12 +- core/state_processor.go | 14 ++- core/state_transition.go | 20 ++- core/types/dag.go | 77 ++++++++---- core/types/dag_test.go | 234 +++++++++++++++++++++++++++++++----- core/types/mvstates.go | 37 ++++-- core/types/mvstates_test.go | 32 ++++- core/vm/interface.go | 2 + 8 files changed, 343 insertions(+), 85 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 83c3fe94c6..2506c56a29 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1798,7 +1798,7 @@ func (s *StateDB) removeStateObjectsDestruct(addr common.Address) { delete(s.stateObjectsDestructDirty, addr) } -func (s *StateDB) ResolveTxDAG(gasFeeReceivers []common.Address) (types.TxDAG, map[int]*types.ExeStat) { +func (s *StateDB) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (types.TxDAG, error) { if s.mvStates == nil { return types.NewEmptyTxDAG(), nil } @@ -1808,7 +1808,15 @@ func (s *StateDB) ResolveTxDAG(gasFeeReceivers []common.Address) (types.TxDAG, m }(time.Now()) } - return s.mvStates.ResolveTxDAG(gasFeeReceivers), s.mvStates.Stats() + return s.mvStates.ResolveTxDAG(txCnt, gasFeeReceivers) +} + +func (s *StateDB) ResolveStats() map[int]*types.ExeStat { + if s.mvStates == nil { + return nil + } + + return s.mvStates.Stats() } func (s *StateDB) MVStates() *types.MVStates { diff --git a/core/state_processor.go b/core/state_processor.go index 9e300b5ddc..201e3e9e6f 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -126,11 +126,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if p.bc.enableTxDAG { // compare input TxDAG when it enable in consensus - dag, extraStats := statedb.ResolveTxDAG([]common.Address{context.Coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) - // TODO(galaio): check TxDAG correctness? - log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) - if metrics.EnabledExpensive { - types.EvaluateTxDAGPerformance(dag, extraStats) + dag, err := statedb.ResolveTxDAG(len(block.Transactions()), []common.Address{context.Coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + if err == nil { + // TODO(galaio): check TxDAG correctness? + log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) + if metrics.EnabledExpensive { + types.EvaluateTxDAGPerformance(dag, statedb.ResolveStats()) + } + } else { + log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) } } return receipts, allLogs, *usedGas, nil diff --git a/core/state_transition.go b/core/state_transition.go index 0814c85256..6fa1a74944 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) @@ -432,6 +433,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if st.msg.IsSystemTx && !st.evm.ChainConfig().IsRegolith(st.evm.Context.Time) { gasUsed = 0 } + // just record error tx here + if ferr := st.state.FinaliseRWSet(); ferr != nil { + log.Error("finalise error deposit tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + } result = &ExecutionResult{ UsedGas: gasUsed, Err: fmt.Errorf("failed deposit: %w", err), @@ -439,6 +444,12 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } err = nil } + if err != nil { + // just record error tx here + if ferr := st.state.FinaliseRWSet(); ferr != nil { + log.Error("finalise error tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + } + } return result, err } @@ -521,6 +532,11 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { } DebugInnerExecutionDuration += time.Since(start) + // stop record rw set in here, skip gas fee distribution + if ferr := st.state.FinaliseRWSet(); ferr != nil { + log.Error("finalise tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + } + // if deposit: skip refunds, skip tipping coinbase // Regolith changes this behaviour to report the actual gasUsed instead of always reporting all gas used. if st.msg.IsDepositTx && !rules.IsOptimismRegolith { @@ -536,10 +552,6 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { ReturnData: ret, }, nil } - // stop record rw set in here, skip gas fee distribution - if err := st.state.FinaliseRWSet(); err != nil { - return nil, err - } // Note for deposit tx there is no ETH refunded for unused gas, but that's taken care of by the fact that gasPrice // is always 0 for deposit tx. So calling refundGas will ensure the gasUsed accounting is correct without actually diff --git a/core/types/dag.go b/core/types/dag.go index 801ec476a2..f5ec17735f 100644 --- a/core/types/dag.go +++ b/core/types/dag.go @@ -18,6 +18,11 @@ const ( PlainTxDAGType ) +var ( + TxDAGRelation0 uint8 = 0 + TxDAGRelation1 uint8 = 1 +) + type TxDAG interface { // Type return TxDAG type Type() byte @@ -93,8 +98,8 @@ func (d *EmptyTxDAG) DelayGasDistribution() bool { func (d *EmptyTxDAG) TxDep(int) TxDep { return TxDep{ - Relation: 1, TxIndexes: nil, + Relation: &TxDAGRelation1, } } @@ -156,9 +161,12 @@ func NewPlainTxDAG(txLen int) *PlainTxDAG { func (d *PlainTxDAG) String() string { builder := strings.Builder{} - exePaths := travelTxDAGExecutionPaths(d) - for _, path := range exePaths { - builder.WriteString(fmt.Sprintf("%v\n", path)) + for _, txDep := range d.TxDeps { + if txDep.Relation == nil || txDep.RelationEqual(TxDAGRelation0) { + builder.WriteString(fmt.Sprintf("%v\n", txDep.TxIndexes)) + continue + } + builder.WriteString(fmt.Sprintf("%d: %v\n", *txDep.Relation, txDep.TxIndexes)) } return builder.String() } @@ -174,11 +182,12 @@ func (d *PlainTxDAG) Size() int { // MergeTxDAGExecutionPaths will merge duplicate tx path for scheduling parallel. // Any tx cannot exist in >= 2 paths. func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { - mergeMap := make(map[uint64][]uint64, d.TxCount()) - txMap := make(map[uint64]uint64, d.TxCount()) - for i := d.TxCount() - 1; i >= 0; i-- { + nd := convert2PlainTxDAGWithRelation0(d) + mergeMap := make(map[uint64][]uint64, nd.TxCount()) + txMap := make(map[uint64]uint64, nd.TxCount()) + for i := nd.TxCount() - 1; i >= 0; i-- { index, merge := uint64(i), uint64(i) - deps := d.TxDep(i).TxIndexes + deps := nd.TxDep(i).TxIndexes if oldIdx, exist := findTxPathIndex(deps, index, txMap); exist { merge = oldIdx } @@ -196,7 +205,7 @@ func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { mergeMap[t] = append(mergeMap[t], f) } mergePaths := make([][]uint64, 0, len(mergeMap)) - for i := 0; i < d.TxCount(); i++ { + for i := 0; i < nd.TxCount(); i++ { path, ok := mergeMap[uint64(i)] if !ok { continue @@ -224,37 +233,46 @@ func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64 // travelTxDAGExecutionPaths will print all tx execution path func travelTxDAGExecutionPaths(d TxDAG) [][]uint64 { - txCount := d.TxCount() - deps := make([]TxDep, txCount) - for i := 0; i < txCount; i++ { + nd := convert2PlainTxDAGWithRelation0(d) + + exePaths := make([][]uint64, 0) + // travel tx deps with BFS + for i := uint64(0); i < uint64(nd.TxCount()); i++ { + exePaths = append(exePaths, travelTxDAGTargetPath(nd.TxDeps, i)) + } + return exePaths +} + +func convert2PlainTxDAGWithRelation0(d TxDAG) *PlainTxDAG { + if d.TxCount() == 0 { + return NewPlainTxDAG(0) + } + nd := NewPlainTxDAG(d.TxCount()) + for i := 0; i < d.TxCount(); i++ { dep := d.TxDep(i) - if dep.Relation == 0 { - deps[i] = dep + if dep.RelationEqual(TxDAGRelation0) { + nd.SetTxDep(i, dep) continue } + np := TxDep{} // recover to relation 0 for j := 0; j < i; j++ { - if !dep.Exist(j) { - deps[i].AppendDep(j) + if !dep.Exist(j) && j != i { + np.AppendDep(j) } } + nd.SetTxDep(i, np) } - - exePaths := make([][]uint64, 0) - // travel tx deps with BFS - for i := uint64(0); i < uint64(txCount); i++ { - exePaths = append(exePaths, travelTxDAGTargetPath(deps, i)) - } - return exePaths + return nd } // TxDep store the current tx dependency relation with other txs type TxDep struct { + TxIndexes []uint64 // It describes the Relation with below txs - // 0: this tx depends on below txs + // 0: this tx depends on below txs, it can be ignored and not be encoded in rlp encoder. // 1: this transaction does not depend on below txs, all other previous txs depend on - Relation uint8 - TxIndexes []uint64 + Relation *uint8 `rlp:"optional"` } func (d *TxDep) AppendDep(i int) { @@ -282,6 +300,13 @@ func (d *TxDep) Last() int { return int(d.TxIndexes[len(d.TxIndexes)-1]) } +func (d *TxDep) RelationEqual(rel uint8) bool { + if d.Relation == nil { + return TxDAGRelation0 == rel + } + return *d.Relation == rel +} + var ( longestTimeTimer = metrics.NewRegisteredTimer("dag/longesttime", nil) longestGasTimer = metrics.NewRegisteredTimer("dag/longestgas", nil) diff --git a/core/types/dag_test.go b/core/types/dag_test.go index 1c0b334a8d..ead8b1cda5 100644 --- a/core/types/dag_test.go +++ b/core/types/dag_test.go @@ -1,6 +1,7 @@ package types import ( + "encoding/hex" "testing" "time" @@ -15,32 +16,32 @@ var ( mockHash = common.HexToHash("0xdc13f8d7bdb8ec4de02cd4a50a1aa2ab73ec8814e0cdb550341623be3dd8ab7a") ) -func TestTxDAG(t *testing.T) { +func TestTxDAG_SetTxDep(t *testing.T) { dag := mockSimpleDAG() require.NoError(t, dag.SetTxDep(9, TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: nil, })) require.NoError(t, dag.SetTxDep(10, TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: nil, })) require.Error(t, dag.SetTxDep(12, TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: nil, })) dag = NewEmptyTxDAG() require.NoError(t, dag.SetTxDep(0, TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: nil, })) require.NoError(t, dag.SetTxDep(11, TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: nil, })) } -func TestTxDAG_SetTxDep(t *testing.T) { +func TestTxDAG(t *testing.T) { dag := mockSimpleDAG() t.Log(dag) dag = mockSystemTxDAG() @@ -53,7 +54,8 @@ func TestEvaluateTxDAG(t *testing.T) { for i := 0; i < dag.TxCount(); i++ { stats[i] = NewExeStat(i).WithGas(uint64(i)).WithRead(i) stats[i].costTime = time.Duration(i) - if dag.TxDep(i).Relation == 1 { + txDep := dag.TxDep(i) + if txDep.RelationEqual(TxDAGRelation1) { stats[i].WithSerialFlag() } } @@ -61,12 +63,36 @@ func TestEvaluateTxDAG(t *testing.T) { } func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { - paths := MergeTxDAGExecutionPaths(mockSimpleDAG()) - require.Equal(t, [][]uint64{ - {0, 3, 4}, - {1, 2, 5, 6, 7}, - {8, 9}, - }, paths) + tests := []struct { + d TxDAG + expect [][]uint64 + }{ + { + d: mockSimpleDAG(), + expect: [][]uint64{ + {0, 3, 4}, + {1, 2, 5, 6, 7}, + {8, 9}, + }, + }, + { + d: mockSimpleDAGWithLargeDeps(), + expect: [][]uint64{ + {5, 6}, + {0, 1, 2, 3, 4, 7, 8, 9}, + }, + }, + { + d: mockSystemTxDAGWithLargeDeps(), + expect: [][]uint64{ + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + }, + }, + } + for i, item := range tests { + paths := MergeTxDAGExecutionPaths(item.d) + require.Equal(t, item.expect, paths, i) + } } func TestMergeTxDAGExecutionPaths_Random(t *testing.T) { @@ -105,10 +131,29 @@ func mockSimpleDAG() TxDAG { return dag } +func mockSimpleDAGWithLargeDeps() TxDAG { + dag := NewPlainTxDAG(10) + dag.TxDeps[0].TxIndexes = []uint64{} + dag.TxDeps[1].TxIndexes = []uint64{} + dag.TxDeps[2].TxIndexes = []uint64{} + dag.TxDeps[3].TxIndexes = []uint64{0} + dag.TxDeps[4].TxIndexes = []uint64{0} + dag.TxDeps[5].TxIndexes = []uint64{} + dag.TxDeps[6].TxIndexes = []uint64{5} + dag.TxDeps[7].TxIndexes = []uint64{2, 4} + dag.TxDeps[8].TxIndexes = []uint64{} + //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 3, 4, 8} + dag.TxDeps[9] = TxDep{ + Relation: &TxDAGRelation1, + TxIndexes: []uint64{2, 5, 6, 7}, + } + return dag +} + func mockRandomDAG(txLen int) TxDAG { dag := NewPlainTxDAG(txLen) for i := 0; i < txLen; i++ { - var deps []uint64 + deps := make([]uint64, 0) if i == 0 || rand.Bool() { dag.TxDeps[i].TxIndexes = deps continue @@ -144,31 +189,154 @@ func mockSystemTxDAG() TxDAG { dag.TxDeps[8].TxIndexes = []uint64{} dag.TxDeps[9].TxIndexes = []uint64{8} dag.TxDeps[10] = TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, TxIndexes: []uint64{}, } dag.TxDeps[11] = TxDep{ - Relation: 1, + Relation: &TxDAGRelation1, + TxIndexes: []uint64{}, + } + return dag +} + +func mockSystemTxDAG2() TxDAG { + dag := NewPlainTxDAG(12) + dag.TxDeps[0] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{}, + } + dag.TxDeps[1] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{}, + } + dag.TxDeps[2] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{}, + } + dag.TxDeps[3] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{0}, + } + dag.TxDeps[4] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{0}, + } + dag.TxDeps[5] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{1, 2}, + } + dag.TxDeps[6] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{2, 5}, + } + dag.TxDeps[7] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{6}, + } + dag.TxDeps[8] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{}, + } + dag.TxDeps[9] = TxDep{ + Relation: &TxDAGRelation0, + TxIndexes: []uint64{8}, + } + dag.TxDeps[10] = TxDep{ + Relation: &TxDAGRelation1, + TxIndexes: []uint64{}, + } + dag.TxDeps[11] = TxDep{ + Relation: &TxDAGRelation1, + TxIndexes: []uint64{}, + } + return dag +} + +func mockSystemTxDAGWithLargeDeps() TxDAG { + dag := NewPlainTxDAG(12) + dag.TxDeps[0].TxIndexes = []uint64{} + dag.TxDeps[1].TxIndexes = []uint64{} + dag.TxDeps[2].TxIndexes = []uint64{} + dag.TxDeps[3].TxIndexes = []uint64{0} + dag.TxDeps[4].TxIndexes = []uint64{0} + dag.TxDeps[5].TxIndexes = []uint64{1, 2} + dag.TxDeps[6].TxIndexes = []uint64{2, 5} + dag.TxDeps[7].TxIndexes = []uint64{0, 1, 3, 5, 6} + dag.TxDeps[8].TxIndexes = []uint64{} + //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 2, 3, 4, 8} + dag.TxDeps[9] = TxDep{ + Relation: &TxDAGRelation1, + TxIndexes: []uint64{5, 6, 7, 10, 11}, + } + dag.TxDeps[10] = TxDep{ + Relation: &TxDAGRelation1, + TxIndexes: []uint64{}, + } + dag.TxDeps[11] = TxDep{ + Relation: &TxDAGRelation1, TxIndexes: []uint64{}, } return dag } func TestTxDAG_Encode_Decode(t *testing.T) { - expected := TxDAG(&EmptyTxDAG{}) - enc, err := EncodeTxDAG(expected) - require.NoError(t, err) - actual, err := DecodeTxDAG(enc) - require.NoError(t, err) - require.Equal(t, expected, actual) - - expected = mockSimpleDAG() - enc, err = EncodeTxDAG(expected) - require.NoError(t, err) - actual, err = DecodeTxDAG(enc) - require.NoError(t, err) - require.Equal(t, expected, actual) - enc[0] = 2 - _, err = DecodeTxDAG(enc) - require.Error(t, err) + tests := []struct { + expect TxDAG + }{ + { + expect: TxDAG(&EmptyTxDAG{}), + }, + { + expect: mockSimpleDAG(), + }, + { + expect: mockRandomDAG(100), + }, + { + expect: mockSystemTxDAG(), + }, + { + expect: mockSystemTxDAG2(), + }, + { + expect: mockSystemTxDAGWithLargeDeps(), + }, + } + for i, item := range tests { + enc, err := EncodeTxDAG(item.expect) + t.Log(hex.EncodeToString(enc)) + require.NoError(t, err, i) + actual, err := DecodeTxDAG(enc) + require.NoError(t, err, i) + require.Equal(t, item.expect, actual, i) + if i%2 == 0 { + enc[0] = 2 + _, err = DecodeTxDAG(enc) + require.Error(t, err) + } + } +} + +func TestDecodeTxDAG(t *testing.T) { + tests := []struct { + enc string + err bool + }{ + {"00c0", false}, + {"01dddcc1c0c1c0c1c0c2c180c2c180c3c20102c3c20205c2c106c1c0c2c108", false}, + {"01e3e2c1c0c1c0c1c0c2c180c2c180c3c20102c3c20205c2c106c1c0c2c108c2c001c2c001", false}, + {"0132e212", true}, + {"01dfdec280c0c280c0c380c101c380c102c380c103c380c104c380c105c380c106", true}, + } + for i, item := range tests { + enc, err := hex.DecodeString(item.enc) + require.NoError(t, err, i) + txDAG, err := DecodeTxDAG(enc) + if item.err { + require.Error(t, err, i) + continue + } + require.NoError(t, err, i) + t.Log(txDAG) + } } diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 85d3886470..bfcaf13613 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -446,28 +446,43 @@ func checkRWSetInconsistent(index int, k RWKey, readSet map[RWKey]*RWItem, write } // ResolveTxDAG generate TxDAG from RWSets -func (s *MVStates) ResolveTxDAG(gasFeeReceivers []common.Address) TxDAG { - rwSets := s.RWSets() - txDAG := NewPlainTxDAG(len(rwSets)) - for i := len(rwSets) - 1; i >= 0; i-- { +func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (TxDAG, error) { + s.lock.RLock() + defer s.lock.RUnlock() + if len(s.rwSets) != txCnt { + return nil, fmt.Errorf("wrong rwSet count, expect: %v, actual: %v", txCnt, len(s.rwSets)) + } + txDAG := NewPlainTxDAG(len(s.rwSets)) + for i := txCnt - 1; i >= 0; i-- { // check if there are RW with gas fee receiver for gas delay calculation for _, addr := range gasFeeReceivers { - if _, ok := rwSets[i].readSet[AccountStateKey(addr, AccountSelf)]; ok { - return NewEmptyTxDAG() + if _, ok := s.rwSets[i].readSet[AccountStateKey(addr, AccountSelf)]; ok { + return NewEmptyTxDAG(), nil } } txDAG.TxDeps[i].TxIndexes = []uint64{} - if rwSets[i].mustSerial { - txDAG.TxDeps[i].Relation = 1 + if s.rwSets[i].mustSerial { + txDAG.TxDeps[i].Relation = &TxDAGRelation1 continue } if s.depsCache[i] == nil { - s.resolveDepsCache(i, rwSets[i]) + s.resolveDepsCache(i, s.rwSets[i]) + } + deps := s.depsCache[i].toArray() + if len(deps) <= (txCnt-1)/2 { + txDAG.TxDeps[i].TxIndexes = deps + continue + } + // if tx deps larger than half of txs, then convert to relation1 + txDAG.TxDeps[i].Relation = &TxDAGRelation1 + for j := uint64(0); j < uint64(txCnt); j++ { + if !slices.Contains(deps, j) && j != uint64(i) { + txDAG.TxDeps[i].TxIndexes = append(txDAG.TxDeps[i].TxIndexes, j) + } } - txDAG.TxDeps[i].TxIndexes = s.depsCache[i].toArray() } - return txDAG + return txDAG, nil } func checkDependency(writeSet map[RWKey]*RWItem, readSet map[RWKey]*RWItem) bool { diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 9d4422bf1f..9dcd83a6ba 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -39,7 +39,7 @@ func TestMVStates_BasicUsage(t *testing.T) { require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadState(5, str2key("0x00"))) } -func TestSimpleMVStates2TxDAG(t *testing.T) { +func TestMVStates_SimpleResolveTxDAG(t *testing.T) { ms := NewMVStates(10) ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) @@ -53,12 +53,13 @@ func TestSimpleMVStates2TxDAG(t *testing.T) { ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) ms.rwSets[9] = mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}) - dag := ms.ResolveTxDAG(nil) + dag, err := ms.ResolveTxDAG(10, nil) + require.NoError(t, err) require.Equal(t, mockSimpleDAG(), dag) t.Log(dag) } -func TestSystemTxMVStates2TxDAG(t *testing.T) { +func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { ms := NewMVStates(12) ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) @@ -74,11 +75,34 @@ func TestSystemTxMVStates2TxDAG(t *testing.T) { ms.rwSets[10] = mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithSerialFlag() ms.rwSets[11] = mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithSerialFlag() - dag := ms.ResolveTxDAG(nil) + dag, err := ms.ResolveTxDAG(12, nil) + require.NoError(t, err) require.Equal(t, mockSystemTxDAG(), dag) t.Log(dag) } +func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { + ms := NewMVStates(12) + + ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) + ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) + ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) + ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) + ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) + ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) + ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) + ms.rwSets[7] = mockRWSet(7, []string{"0x00", "0x01", "0x03", "0x05", "0x06", "0x07"}, []string{"0x07"}) + ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) + ms.rwSets[9] = mockRWSet(9, []string{"0x00", "0x01", "0x02", "0x03", "0x04", "0x08", "0x09"}, []string{"0x09"}) + ms.rwSets[10] = mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithSerialFlag() + ms.rwSets[11] = mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithSerialFlag() + + dag, err := ms.ResolveTxDAG(12, nil) + require.NoError(t, err) + require.Equal(t, mockSystemTxDAGWithLargeDeps(), dag) + t.Log(dag) +} + func TestIsEqualRWVal(t *testing.T) { tests := []struct { key RWKey diff --git a/core/vm/interface.go b/core/vm/interface.go index a0ded99fce..0242db0138 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -80,6 +80,8 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) + TxIndex() int + BeforeTxTransition() FinaliseRWSet() error } From 71f4525a66a75d188c34c8944c8dc79540285841 Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 23 Aug 2024 17:22:24 +0800 Subject: [PATCH 03/42] mvstates: fix oom issue when mining is enabled; mvstates: opt async dep generation; mvstates: opt resolve dep logic; mvstates: fix async dep gen deadlock issue; miner: support record sysytem tx rwset; miner: opt txdag enable checking; txdag: fix system tx finalise issue; mvstate: using pending writes to accelerate txdag generation; txdag: test snappy compress ratio; txdag: add more bench tests; txdag: reduce mem alloc and async resolve tx dependency; txdag: add excluded flag; mvstates: generate txdag with excluded flag; txdag: support multi flags, and supported in pevm; txdag: opt TxDAG rwset collecting & generating; txdag: opt txdag encoding, reduce rlp size --- core/blockchain.go | 17 +- core/state/statedb.go | 33 ++-- core/state_processor.go | 20 +-- core/state_transition.go | 12 +- core/types/dag.go | 307 ++++++++++++++++++++++++++++-------- core/types/dag_test.go | 291 +++++++++++++++++++++------------- core/types/mvstates.go | 217 +++++++++++++++++++------ core/types/mvstates_test.go | 268 ++++++++++++++++++++++++++----- core/vm/interface.go | 1 + miner/worker.go | 21 ++- 10 files changed, 887 insertions(+), 300 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 4e5e5893ce..71abfd8673 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1983,6 +1983,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) vtime := time.Since(vstart) proctime := time.Since(start) // processing + validation + if bc.enableTxDAG { + // compare input TxDAG when it enable in consensus + dag, err := statedb.ResolveTxDAG(len(block.Transactions()), []common.Address{block.Coinbase(), params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + if err == nil { + // TODO(galaio): check TxDAG correctness? + log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) + if metrics.EnabledExpensive { + go types.EvaluateTxDAGPerformance(dag, statedb.ResolveStats()) + } + } else { + log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) + } + } + // Update the metrics touched during block processing and validation accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) @@ -2765,10 +2779,11 @@ func (bc *BlockChain) HeaderChainForceSetHead(headNumber uint64) { bc.hc.SetHead(headNumber, nil, createDelFn(bc)) } -func (bc *BlockChain) TxDAGEnabled() bool { +func (bc *BlockChain) TxDAGEnabledWhenMine() bool { return bc.enableTxDAG } func (bc *BlockChain) SetupTxDAGGeneration() { + log.Info("node enable TxDAG feature") bc.enableTxDAG = true } diff --git a/core/state/statedb.go b/core/state/statedb.go index 2506c56a29..23bacbfafd 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1714,12 +1714,16 @@ func (s *StateDB) StopTxStat(usedGas uint64) { } // record stat first if metrics.EnabledExpensive && s.stat != nil { - s.stat.Done().WithGas(usedGas).WithRead(len(s.rwSet.ReadSet())) + s.stat.Done().WithGas(usedGas) + rwSet := s.mvStates.RWSet(s.txIndex) + if rwSet != nil { + s.stat.WithRead(len(rwSet.ReadSet())) + } } } func (s *StateDB) RecordRead(key types.RWKey, val interface{}) { - if s.rwSet == nil || s.rwSet.RWRecordDone() { + if s.rwSet == nil { return } s.rwSet.RecordRead(key, types.StateVersion{ @@ -1728,21 +1732,26 @@ func (s *StateDB) RecordRead(key types.RWKey, val interface{}) { } func (s *StateDB) RecordWrite(key types.RWKey, val interface{}) { - if s.rwSet == nil || s.rwSet.RWRecordDone() { + if s.rwSet == nil { return } s.rwSet.RecordWrite(key, val) } func (s *StateDB) ResetMVStates(txCount int) { - s.mvStates = types.NewMVStates(txCount) + if s.mvStates != nil { + s.mvStates.Stop() + } + s.mvStates = types.NewMVStates(txCount).EnableAsyncDepGen() s.rwSet = nil } func (s *StateDB) FinaliseRWSet() error { - if s.rwSet == nil || s.rwSet.RWRecordDone() { + if s.rwSet == nil { return nil } + rwSet := s.rwSet + stat := s.stat if metrics.EnabledExpensive { defer func(start time.Time) { s.TxDAGGenerate += time.Since(start) @@ -1751,7 +1760,7 @@ func (s *StateDB) FinaliseRWSet() error { ver := types.StateVersion{ TxIndex: s.txIndex, } - if ver != s.rwSet.Version() { + if ver != rwSet.Version() { return errors.New("you finalize a wrong ver of RWSet") } @@ -1778,8 +1787,13 @@ func (s *StateDB) FinaliseRWSet() error { } } - s.rwSet.SetRWRecordDone() - return s.mvStates.FulfillRWSet(s.rwSet, s.stat) + // reset stateDB + s.rwSet = nil + if err := s.mvStates.FulfillRWSet(rwSet, stat); err != nil { + return err + } + // just Finalise rwSet in serial execution + return s.mvStates.Finalise(s.txIndex) } func (s *StateDB) getStateObjectsDestruct(addr common.Address) (*types.StateAccount, bool) { @@ -1829,7 +1843,8 @@ func (s *StateDB) RecordSystemTxRWSet(index int) { } s.mvStates.FulfillRWSet(types.NewRWSet(types.StateVersion{ TxIndex: index, - }).WithSerialFlag(), types.NewExeStat(index).WithSerialFlag()) + }).WithExcludedTxFlag(), types.NewExeStat(index).WithExcludedTxFlag()) + s.mvStates.Finalise(index) } // copySet returns a deep-copied set. diff --git a/core/state_processor.go b/core/state_processor.go index 201e3e9e6f..3ecf4644dd 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -22,8 +22,6 @@ import ( "math/big" "time" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" @@ -109,6 +107,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } + // if systemTx or depositTx, tag it + if tx.IsSystemTx() || tx.IsDepositTx() { + statedb.RecordSystemTxRWSet(i) + } receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) if metrics.EnabledExpensive { @@ -123,20 +125,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) - - if p.bc.enableTxDAG { - // compare input TxDAG when it enable in consensus - dag, err := statedb.ResolveTxDAG(len(block.Transactions()), []common.Address{context.Coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) - if err == nil { - // TODO(galaio): check TxDAG correctness? - log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) - if metrics.EnabledExpensive { - types.EvaluateTxDAGPerformance(dag, statedb.ResolveStats()) - } - } else { - log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) - } - } return receipts, allLogs, *usedGas, nil } diff --git a/core/state_transition.go b/core/state_transition.go index 6fa1a74944..3e7aeaa084 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -409,6 +409,10 @@ func (st *StateTransition) preCheck() error { // However if any consensus issue encountered, return the error directly with // nil evm execution result. func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { + // start record rw set in here + if !st.msg.IsSystemTx && !st.msg.IsDepositTx { + st.state.BeforeTxTransition() + } if mint := st.msg.Mint; mint != nil { mintU256, overflow := uint256.FromBig(mint) if overflow { @@ -435,7 +439,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } // just record error tx here if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise error deposit tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + log.Error("finalise error deposit tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) } result = &ExecutionResult{ UsedGas: gasUsed, @@ -447,15 +451,13 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if err != nil { // just record error tx here if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise error tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + log.Error("finalise error tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) } } return result, err } func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { - // start record rw set in here - st.state.BeforeTxTransition() // First check this message satisfies all consensus rules before // applying the message. The rules include these clauses // @@ -534,7 +536,7 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { // stop record rw set in here, skip gas fee distribution if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex()) + log.Error("finalise tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) } // if deposit: skip refunds, skip tipping coinbase diff --git a/core/types/dag.go b/core/types/dag.go index f5ec17735f..e46f7bb897 100644 --- a/core/types/dag.go +++ b/core/types/dag.go @@ -7,11 +7,41 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/exp/slices" ) +const TxDAGAbiJson = ` +[ + { + "type": "function", + "name": "setTxDAG", + "inputs": [ + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + } +] +` + +var TxDAGABI abi.ABI + +func init() { + var err error + // must be able to register the TxDAGABI + TxDAGABI, err = abi.JSON(strings.NewReader(TxDAGAbiJson)) + if err != nil { + panic(err) + } +} + // TxDAGType Used to extend TxDAG and customize a new DAG structure const ( EmptyTxDAGType byte = iota @@ -19,8 +49,13 @@ const ( ) var ( - TxDAGRelation0 uint8 = 0 - TxDAGRelation1 uint8 = 1 + // NonDependentRelFlag indicates that the txs described is non-dependent + // and is used to reduce storage when there are a large number of dependencies. + NonDependentRelFlag uint8 = 0x01 + // ExcludedTxFlag indicates that the tx is excluded from TxDAG, user should execute them in sequence. + // These excluded transactions should be consecutive in the head or tail. + ExcludedTxFlag uint8 = 0x02 + TxDepFlagMask = NonDependentRelFlag | ExcludedTxFlag ) type TxDAG interface { @@ -30,11 +65,11 @@ type TxDAG interface { // Inner return inner instance Inner() interface{} - // DelayGasDistribution check if delay the distribution of GasFee - DelayGasDistribution() bool + // DelayGasFeeDistribution check if delay the distribution of GasFee + DelayGasFeeDistribution() bool // TxDep query TxDeps from TxDAG - TxDep(int) TxDep + TxDep(int) *TxDep // TxCount return tx count TxCount() int @@ -43,6 +78,37 @@ type TxDAG interface { SetTxDep(int, TxDep) error } +func DecodeTxDAGCalldata(data []byte) (TxDAG, error) { + // trim the method id before unpack + if len(data) < 4 { + return nil, fmt.Errorf("invalid txDAG calldata, len(data)=%d", len(data)) + } + calldata, err := TxDAGABI.Methods["setTxDAG"].Inputs.Unpack(data[4:]) + if err != nil { + return nil, fmt.Errorf("failed to call abi unpack, err: %v", err) + } + if len(calldata) <= 0 { + return nil, fmt.Errorf("invalid txDAG calldata, len(calldata)=%d", len(calldata)) + } + data, ok := calldata[0].([]byte) + if !ok { + return nil, fmt.Errorf("invalid txDAG calldata parameter") + } + return DecodeTxDAG(data) +} + +func EncodeTxDAGCalldata(dag TxDAG) ([]byte, error) { + data, err := EncodeTxDAG(dag) + if err != nil { + return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) + } + data, err = TxDAGABI.Pack("setTxDAG", data) + if err != nil { + return nil, fmt.Errorf("failed to call abi pack, err: %v", err) + } + return data, nil +} + func EncodeTxDAG(dag TxDAG) ([]byte, error) { if dag == nil { return nil, errors.New("input nil TxDAG") @@ -74,6 +140,76 @@ func DecodeTxDAG(enc []byte) (TxDAG, error) { } } +func ValidateTxDAG(d TxDAG, txCnt int) error { + if d == nil { + return nil + } + + switch d.Type() { + case EmptyTxDAGType: + return nil + case PlainTxDAGType: + return ValidatePlainTxDAG(d, txCnt) + default: + return fmt.Errorf("unsupported TxDAG type: %v", d.Type()) + } +} + +func ValidatePlainTxDAG(d TxDAG, txCnt int) error { + if d.TxCount() != txCnt { + return fmt.Errorf("PlainTxDAG contains wrong txs count, expect: %v, actual: %v", txCnt, d.TxCount()) + } + for i := 0; i < txCnt; i++ { + dep := d.TxDep(i) + if dep == nil { + return fmt.Errorf("PlainTxDAG contains nil txdep, tx: %v", i) + } + for j, tx := range dep.TxIndexes { + if tx >= uint64(i) || tx >= uint64(txCnt) { + return fmt.Errorf("PlainTxDAG contains the exceed range dependency, tx: %v", i) + } + if j > 0 && dep.TxIndexes[j] <= dep.TxIndexes[j-1] { + return fmt.Errorf("PlainTxDAG contains unordered dependency, tx: %v", i) + } + } + if dep.Flags != nil && *dep.Flags & ^TxDepFlagMask > 0 { + return fmt.Errorf("PlainTxDAG contains unknown flags, flags: %v", *dep.Flags) + } + } + return nil +} + +// GetTxDAG return TxDAG bytes from block if there is any, or return nil if not exist +// the txDAG is stored in the calldata of the last transaction of the block +func GetTxDAG(block *Block) (TxDAG, error) { + txs := block.Transactions() + if txs.Len() <= 0 { + return nil, fmt.Errorf("no txdag found") + } + // get data from the last tx + return DecodeTxDAGCalldata(txs[txs.Len()-1].Data()) +} + +func TxDependency(d TxDAG, i int) []uint64 { + if d == nil || i < 0 || i >= d.TxCount() { + return []uint64{} + } + dep := d.TxDep(i) + if dep.CheckFlag(ExcludedTxFlag) { + return []uint64{} + } + if dep.CheckFlag(NonDependentRelFlag) { + txs := make([]uint64, 0, d.TxCount()-dep.Count()) + for j := 0; j < i; j++ { + if !dep.Exist(j) && j != i { + txs = append(txs, uint64(j)) + } + } + return txs + } + return dep.TxIndexes +} + // EmptyTxDAG indicate that execute txs in sequence // It means no transactions or need timely distribute transaction fees // it only keep partial serial execution when tx cannot delay the distribution or just execute txs in sequence @@ -92,15 +228,17 @@ func (d *EmptyTxDAG) Inner() interface{} { return d } -func (d *EmptyTxDAG) DelayGasDistribution() bool { +func (d *EmptyTxDAG) DelayGasFeeDistribution() bool { return false } -func (d *EmptyTxDAG) TxDep(int) TxDep { - return TxDep{ +func (d *EmptyTxDAG) TxDep(int) *TxDep { + dep := TxDep{ TxIndexes: nil, - Relation: &TxDAGRelation1, + Flags: new(uint8), } + dep.SetFlag(NonDependentRelFlag) + return &dep } func (d *EmptyTxDAG) TxCount() int { @@ -112,7 +250,7 @@ func (d *EmptyTxDAG) SetTxDep(int, TxDep) error { } func (d *EmptyTxDAG) String() string { - return "None" + return "EmptyTxDAG" } // PlainTxDAG indicate how to use the dependency of txs, and delay the distribution of GasFee @@ -129,12 +267,12 @@ func (d *PlainTxDAG) Inner() interface{} { return d } -func (d *PlainTxDAG) DelayGasDistribution() bool { +func (d *PlainTxDAG) DelayGasFeeDistribution() bool { return true } -func (d *PlainTxDAG) TxDep(i int) TxDep { - return d.TxDeps[i] +func (d *PlainTxDAG) TxDep(i int) *TxDep { + return &d.TxDeps[i] } func (d *PlainTxDAG) TxCount() int { @@ -162,11 +300,11 @@ func NewPlainTxDAG(txLen int) *PlainTxDAG { func (d *PlainTxDAG) String() string { builder := strings.Builder{} for _, txDep := range d.TxDeps { - if txDep.Relation == nil || txDep.RelationEqual(TxDAGRelation0) { - builder.WriteString(fmt.Sprintf("%v\n", txDep.TxIndexes)) + if txDep.Flags != nil { + builder.WriteString(fmt.Sprintf("%v|%v\n", txDep.TxIndexes, *txDep.Flags)) continue } - builder.WriteString(fmt.Sprintf("%d: %v\n", *txDep.Relation, txDep.TxIndexes)) + builder.WriteString(fmt.Sprintf("%v\n", txDep.TxIndexes)) } return builder.String() } @@ -181,13 +319,17 @@ func (d *PlainTxDAG) Size() int { // MergeTxDAGExecutionPaths will merge duplicate tx path for scheduling parallel. // Any tx cannot exist in >= 2 paths. -func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { - nd := convert2PlainTxDAGWithRelation0(d) - mergeMap := make(map[uint64][]uint64, nd.TxCount()) - txMap := make(map[uint64]uint64, nd.TxCount()) - for i := nd.TxCount() - 1; i >= 0; i-- { +func MergeTxDAGExecutionPaths(d TxDAG, from, to uint64) ([][]uint64, error) { + if from > to || to >= uint64(d.TxCount()) { + return nil, fmt.Errorf("input wrong from: %v, to: %v, txCnt:%v", from, to, d.TxCount()) + } + mergeMap := make(map[uint64][]uint64, d.TxCount()) + txMap := make(map[uint64]uint64, d.TxCount()) + for i := int(to); i >= int(from); i-- { index, merge := uint64(i), uint64(i) - deps := nd.TxDep(i).TxIndexes + deps := TxDependency(d, i) + // drop the out range txs + deps = depExcludeTxRange(deps, from, to) if oldIdx, exist := findTxPathIndex(deps, index, txMap); exist { merge = oldIdx } @@ -202,11 +344,14 @@ func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { if mergeMap[t] == nil { mergeMap[t] = make([]uint64, 0) } + if f < from || f > to { + continue + } mergeMap[t] = append(mergeMap[t], f) } mergePaths := make([][]uint64, 0, len(mergeMap)) - for i := 0; i < nd.TxCount(); i++ { - path, ok := mergeMap[uint64(i)] + for i := from; i <= to; i++ { + path, ok := mergeMap[i] if !ok { continue } @@ -214,7 +359,25 @@ func MergeTxDAGExecutionPaths(d TxDAG) [][]uint64 { mergePaths = append(mergePaths, path) } - return mergePaths + return mergePaths, nil +} + +// depExcludeTxRange drop all from~to items, and deps is ordered. +func depExcludeTxRange(deps []uint64, from uint64, to uint64) []uint64 { + if len(deps) == 0 { + return deps + } + start, end := 0, len(deps)-1 + for start < len(deps) && deps[start] < from { + start++ + } + for end >= 0 && deps[end] > to { + end-- + } + if start > end { + return nil + } + return deps[start : end+1] } func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64, bool) { @@ -233,46 +396,33 @@ func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64 // travelTxDAGExecutionPaths will print all tx execution path func travelTxDAGExecutionPaths(d TxDAG) [][]uint64 { - nd := convert2PlainTxDAGWithRelation0(d) - exePaths := make([][]uint64, 0) // travel tx deps with BFS - for i := uint64(0); i < uint64(nd.TxCount()); i++ { - exePaths = append(exePaths, travelTxDAGTargetPath(nd.TxDeps, i)) + for i := uint64(0); i < uint64(d.TxCount()); i++ { + exePaths = append(exePaths, travelTxDAGTargetPath(d, i)) } return exePaths } -func convert2PlainTxDAGWithRelation0(d TxDAG) *PlainTxDAG { - if d.TxCount() == 0 { - return NewPlainTxDAG(0) - } - nd := NewPlainTxDAG(d.TxCount()) - for i := 0; i < d.TxCount(); i++ { - dep := d.TxDep(i) - if dep.RelationEqual(TxDAGRelation0) { - nd.SetTxDep(i, dep) - continue - } - np := TxDep{} - // recover to relation 0 - for j := 0; j < i; j++ { - if !dep.Exist(j) && j != i { - np.AppendDep(j) - } - } - nd.SetTxDep(i, np) - } - return nd -} - // TxDep store the current tx dependency relation with other txs type TxDep struct { TxIndexes []uint64 - // It describes the Relation with below txs - // 0: this tx depends on below txs, it can be ignored and not be encoded in rlp encoder. - // 1: this transaction does not depend on below txs, all other previous txs depend on - Relation *uint8 `rlp:"optional"` + // Flags may has multi flag meaning, ref NonDependentRelFlag, ExcludedTxFlag. + Flags *uint8 `rlp:"optional"` +} + +func NewTxDep(indexes []uint64, flags ...uint8) TxDep { + dep := TxDep{ + TxIndexes: indexes, + } + if len(flags) == 0 { + return dep + } + dep.Flags = new(uint8) + for _, flag := range flags { + dep.SetFlag(flag) + } + return dep } func (d *TxDep) AppendDep(i int) { @@ -300,11 +450,26 @@ func (d *TxDep) Last() int { return int(d.TxIndexes[len(d.TxIndexes)-1]) } -func (d *TxDep) RelationEqual(rel uint8) bool { - if d.Relation == nil { - return TxDAGRelation0 == rel +func (d *TxDep) CheckFlag(flag uint8) bool { + var flags uint8 + if d.Flags != nil { + flags = *d.Flags + } + return flags&flag == flag +} + +func (d *TxDep) SetFlag(flag uint8) { + if d.Flags == nil { + d.Flags = new(uint8) + } + *d.Flags |= flag +} + +func (d *TxDep) ClearFlag(flag uint8) { + if d.Flags == nil { + return } - return *d.Relation == rel + *d.Flags &= ^flag } var ( @@ -341,7 +506,7 @@ func EvaluateTxDAGPerformance(dag TxDAG, stats map[int]*ExeStat) { totalTxMeter.Mark(int64(txCount)) for i, path := range paths { - if stats[i].mustSerial { + if stats[i].excludedTx { continue } if len(path) <= 1 { @@ -402,7 +567,7 @@ func EvaluateTxDAGPerformance(dag TxDAG, stats map[int]*ExeStat) { sPath []int ) for i, stat := range stats { - if stat.mustSerial { + if stat.excludedTx { continue } sPath = append(sPath, i) @@ -414,16 +579,18 @@ func EvaluateTxDAGPerformance(dag TxDAG, stats map[int]*ExeStat) { } // travelTxDAGTargetPath will print target execution path -func travelTxDAGTargetPath(deps []TxDep, from uint64) []uint64 { - queue := make([]uint64, 0, len(deps)) - path := make([]uint64, 0, len(deps)) +func travelTxDAGTargetPath(d TxDAG, from uint64) []uint64 { + var ( + queue []uint64 + path []uint64 + ) queue = append(queue, from) path = append(path, from) for len(queue) > 0 { - next := make([]uint64, 0, len(deps)) + var next []uint64 for _, i := range queue { - for _, dep := range deps[i].TxIndexes { + for _, dep := range TxDependency(d, int(i)) { if !slices.Contains(path, dep) { path = append(path, dep) next = append(next, dep) @@ -445,7 +612,7 @@ type ExeStat struct { costTime time.Duration // some flags - mustSerial bool + excludedTx bool } func NewExeStat(txIndex int) *ExeStat { @@ -464,8 +631,8 @@ func (s *ExeStat) Done() *ExeStat { return s } -func (s *ExeStat) WithSerialFlag() *ExeStat { - s.mustSerial = true +func (s *ExeStat) WithExcludedTxFlag() *ExeStat { + s.excludedTx = true return s } diff --git a/core/types/dag_test.go b/core/types/dag_test.go index ead8b1cda5..cc50f2e5db 100644 --- a/core/types/dag_test.go +++ b/core/types/dag_test.go @@ -5,9 +5,12 @@ import ( "testing" "time" + "github.com/golang/snappy" + "github.com/cometbft/cometbft/libs/rand" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,29 +19,26 @@ var ( mockHash = common.HexToHash("0xdc13f8d7bdb8ec4de02cd4a50a1aa2ab73ec8814e0cdb550341623be3dd8ab7a") ) +func TestEncodeTxDAGCalldata(t *testing.T) { + tg := mockSimpleDAG() + data, err := EncodeTxDAGCalldata(tg) + assert.Equal(t, nil, err) + tg, err = DecodeTxDAGCalldata(data) + assert.Equal(t, nil, err) + assert.Equal(t, true, tg.TxCount() > 0) + + _, err = DecodeTxDAGCalldata(nil) + assert.NotEqual(t, nil, err) +} + func TestTxDAG_SetTxDep(t *testing.T) { dag := mockSimpleDAG() - require.NoError(t, dag.SetTxDep(9, TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: nil, - })) - require.NoError(t, dag.SetTxDep(10, TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: nil, - })) - require.Error(t, dag.SetTxDep(12, TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: nil, - })) + require.NoError(t, dag.SetTxDep(9, NewTxDep(nil, NonDependentRelFlag))) + require.NoError(t, dag.SetTxDep(10, NewTxDep(nil, NonDependentRelFlag))) + require.Error(t, dag.SetTxDep(12, NewTxDep(nil, NonDependentRelFlag))) dag = NewEmptyTxDAG() - require.NoError(t, dag.SetTxDep(0, TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: nil, - })) - require.NoError(t, dag.SetTxDep(11, TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: nil, - })) + require.NoError(t, dag.SetTxDep(0, NewTxDep(nil, NonDependentRelFlag))) + require.NoError(t, dag.SetTxDep(11, NewTxDep(nil, NonDependentRelFlag))) } func TestTxDAG(t *testing.T) { @@ -55,8 +55,8 @@ func TestEvaluateTxDAG(t *testing.T) { stats[i] = NewExeStat(i).WithGas(uint64(i)).WithRead(i) stats[i].costTime = time.Duration(i) txDep := dag.TxDep(i) - if txDep.RelationEqual(TxDAGRelation1) { - stats[i].WithSerialFlag() + if txDep.CheckFlag(NonDependentRelFlag) { + stats[i].WithExcludedTxFlag() } } EvaluateTxDAGPerformance(dag, stats) @@ -65,10 +65,14 @@ func TestEvaluateTxDAG(t *testing.T) { func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { tests := []struct { d TxDAG + from uint64 + to uint64 expect [][]uint64 }{ { - d: mockSimpleDAG(), + d: mockSimpleDAG(), + from: 0, + to: 9, expect: [][]uint64{ {0, 3, 4}, {1, 2, 5, 6, 7}, @@ -76,28 +80,63 @@ func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { }, }, { - d: mockSimpleDAGWithLargeDeps(), + d: mockSimpleDAG(), + from: 1, + to: 1, + expect: [][]uint64{ + {1}, + }, + }, + { + d: mockSimpleDAGWithLargeDeps(), + from: 0, + to: 9, expect: [][]uint64{ {5, 6}, {0, 1, 2, 3, 4, 7, 8, 9}, }, }, { - d: mockSystemTxDAGWithLargeDeps(), + d: mockSystemTxDAGWithLargeDeps(), + from: 0, + to: 11, + expect: [][]uint64{ + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + {10}, + {11}, + }, + }, + { + d: mockSimpleDAGWithLargeDeps(), + from: 5, + to: 8, + expect: [][]uint64{ + {5, 6}, + {7}, + {8}, + }, + }, + { + d: mockSimpleDAGWithLargeDeps(), + from: 5, + to: 9, expect: [][]uint64{ - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + {5, 6}, + {7}, + {8, 9}, }, }, } for i, item := range tests { - paths := MergeTxDAGExecutionPaths(item.d) + paths, err := MergeTxDAGExecutionPaths(item.d, item.from, item.to) + require.NoError(t, err) require.Equal(t, item.expect, paths, i) } } func TestMergeTxDAGExecutionPaths_Random(t *testing.T) { dag := mockRandomDAG(10000) - paths := MergeTxDAGExecutionPaths(dag) + paths, _ := MergeTxDAGExecutionPaths(dag, 0, uint64(dag.TxCount()-1)) txMap := make(map[uint64]uint64, dag.TxCount()) for _, path := range paths { for _, index := range path { @@ -109,10 +148,33 @@ func TestMergeTxDAGExecutionPaths_Random(t *testing.T) { require.Equal(t, dag.TxCount(), len(txMap)) } +func TestTxDAG_Compression(t *testing.T) { + dag := mockRandomDAG(10000) + enc, err := EncodeTxDAG(dag) + require.NoError(t, err) + encoded := snappy.Encode(nil, enc) + t.Log("enc", len(enc), "compressed", len(encoded), "ratio", 1-(float64(len(encoded))/float64(len(enc)))) +} + func BenchmarkMergeTxDAGExecutionPaths(b *testing.B) { dag := mockRandomDAG(100000) for i := 0; i < b.N; i++ { - MergeTxDAGExecutionPaths(dag) + MergeTxDAGExecutionPaths(dag, 0, uint64(dag.TxCount()-1)) + } +} + +func BenchmarkTxDAG_Encode(b *testing.B) { + dag := mockRandomDAG(10000) + for i := 0; i < b.N; i++ { + EncodeTxDAG(dag) + } +} + +func BenchmarkTxDAG_Decode(b *testing.B) { + dag := mockRandomDAG(10000) + enc, _ := EncodeTxDAG(dag) + for i := 0; i < b.N; i++ { + DecodeTxDAG(enc) } } @@ -124,7 +186,7 @@ func mockSimpleDAG() TxDAG { dag.TxDeps[3].TxIndexes = []uint64{0} dag.TxDeps[4].TxIndexes = []uint64{0} dag.TxDeps[5].TxIndexes = []uint64{1, 2} - dag.TxDeps[6].TxIndexes = []uint64{2, 5} + dag.TxDeps[6].TxIndexes = []uint64{5} dag.TxDeps[7].TxIndexes = []uint64{6} dag.TxDeps[8].TxIndexes = []uint64{} dag.TxDeps[9].TxIndexes = []uint64{8} @@ -143,10 +205,7 @@ func mockSimpleDAGWithLargeDeps() TxDAG { dag.TxDeps[7].TxIndexes = []uint64{2, 4} dag.TxDeps[8].TxIndexes = []uint64{} //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 3, 4, 8} - dag.TxDeps[9] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{2, 5, 6, 7}, - } + dag.TxDeps[9] = NewTxDep([]uint64{2, 5, 6, 7}, NonDependentRelFlag) return dag } @@ -184,71 +243,29 @@ func mockSystemTxDAG() TxDAG { dag.TxDeps[3].TxIndexes = []uint64{0} dag.TxDeps[4].TxIndexes = []uint64{0} dag.TxDeps[5].TxIndexes = []uint64{1, 2} - dag.TxDeps[6].TxIndexes = []uint64{2, 5} + dag.TxDeps[6].TxIndexes = []uint64{5} dag.TxDeps[7].TxIndexes = []uint64{6} dag.TxDeps[8].TxIndexes = []uint64{} dag.TxDeps[9].TxIndexes = []uint64{8} - dag.TxDeps[10] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } - dag.TxDeps[11] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } + dag.TxDeps[10] = NewTxDep([]uint64{}, ExcludedTxFlag) + dag.TxDeps[11] = NewTxDep([]uint64{}, ExcludedTxFlag) return dag } func mockSystemTxDAG2() TxDAG { dag := NewPlainTxDAG(12) - dag.TxDeps[0] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{}, - } - dag.TxDeps[1] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{}, - } - dag.TxDeps[2] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{}, - } - dag.TxDeps[3] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{0}, - } - dag.TxDeps[4] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{0}, - } - dag.TxDeps[5] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{1, 2}, - } - dag.TxDeps[6] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{2, 5}, - } - dag.TxDeps[7] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{6}, - } - dag.TxDeps[8] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{}, - } - dag.TxDeps[9] = TxDep{ - Relation: &TxDAGRelation0, - TxIndexes: []uint64{8}, - } - dag.TxDeps[10] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } - dag.TxDeps[11] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } + dag.TxDeps[0] = NewTxDep([]uint64{}) + dag.TxDeps[1] = NewTxDep([]uint64{}) + dag.TxDeps[2] = NewTxDep([]uint64{}) + dag.TxDeps[3] = NewTxDep([]uint64{0}) + dag.TxDeps[4] = NewTxDep([]uint64{0}) + dag.TxDeps[5] = NewTxDep([]uint64{1, 2}) + dag.TxDeps[6] = NewTxDep([]uint64{5}) + dag.TxDeps[7] = NewTxDep([]uint64{6}) + dag.TxDeps[8] = NewTxDep([]uint64{}) + dag.TxDeps[9] = NewTxDep([]uint64{8}) + dag.TxDeps[10] = NewTxDep([]uint64{}, NonDependentRelFlag) + dag.TxDeps[11] = NewTxDep([]uint64{}, NonDependentRelFlag) return dag } @@ -260,22 +277,13 @@ func mockSystemTxDAGWithLargeDeps() TxDAG { dag.TxDeps[3].TxIndexes = []uint64{0} dag.TxDeps[4].TxIndexes = []uint64{0} dag.TxDeps[5].TxIndexes = []uint64{1, 2} - dag.TxDeps[6].TxIndexes = []uint64{2, 5} - dag.TxDeps[7].TxIndexes = []uint64{0, 1, 3, 5, 6} + dag.TxDeps[6].TxIndexes = []uint64{5} + dag.TxDeps[7].TxIndexes = []uint64{3} dag.TxDeps[8].TxIndexes = []uint64{} - //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 2, 3, 4, 8} - dag.TxDeps[9] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{5, 6, 7, 10, 11}, - } - dag.TxDeps[10] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } - dag.TxDeps[11] = TxDep{ - Relation: &TxDAGRelation1, - TxIndexes: []uint64{}, - } + //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 2, 6, 7, 8} + dag.TxDeps[9] = NewTxDep([]uint64{3, 4, 5, 10, 11}, NonDependentRelFlag) + dag.TxDeps[10] = NewTxDep([]uint64{}, ExcludedTxFlag) + dag.TxDeps[11] = NewTxDep([]uint64{}, ExcludedTxFlag) return dag } @@ -327,6 +335,7 @@ func TestDecodeTxDAG(t *testing.T) { {"01e3e2c1c0c1c0c1c0c2c180c2c180c3c20102c3c20205c2c106c1c0c2c108c2c001c2c001", false}, {"0132e212", true}, {"01dfdec280c0c280c0c380c101c380c102c380c103c380c104c380c105c380c106", true}, + {"01cdccc280c0c280c0c280c0c280c0", true}, } for i, item := range tests { enc, err := hex.DecodeString(item.enc) @@ -340,3 +349,73 @@ func TestDecodeTxDAG(t *testing.T) { t.Log(txDAG) } } + +func TestTxDep_Flags(t *testing.T) { + dep := NewTxDep(nil) + dep.ClearFlag(NonDependentRelFlag) + dep.SetFlag(NonDependentRelFlag) + dep.SetFlag(ExcludedTxFlag) + compared := NewTxDep(nil, NonDependentRelFlag, ExcludedTxFlag) + require.Equal(t, dep, compared) + require.Equal(t, NonDependentRelFlag|ExcludedTxFlag, *dep.Flags) + dep.ClearFlag(ExcludedTxFlag) + require.Equal(t, NonDependentRelFlag, *dep.Flags) + require.True(t, dep.CheckFlag(NonDependentRelFlag)) + require.False(t, dep.CheckFlag(ExcludedTxFlag)) +} + +func TestDepExcludeTxRange(t *testing.T) { + tests := []struct { + src []uint64 + from uint64 + to uint64 + expect []uint64 + }{ + { + src: nil, + from: 0, + to: 4, + expect: nil, + }, + { + src: []uint64{}, + from: 0, + to: 4, + expect: []uint64{}, + }, + { + src: []uint64{0, 1, 2, 3, 4}, + from: 4, + to: 4, + expect: []uint64{4}, + }, + { + src: []uint64{0, 1, 2, 3, 4}, + from: 1, + to: 3, + expect: []uint64{1, 2, 3}, + }, + { + src: []uint64{0, 1, 2, 3, 4}, + from: 5, + to: 6, + expect: nil, + }, + { + src: []uint64{2, 3, 4}, + from: 0, + to: 1, + expect: nil, + }, + { + src: []uint64{0, 1, 2, 3, 4}, + from: 0, + to: 4, + expect: []uint64{0, 1, 2, 3, 4}, + }, + } + + for i, item := range tests { + require.Equal(t, item.expect, depExcludeTxRange(item.src, item.from, item.to), i) + } +} diff --git a/core/types/mvstates.go b/core/types/mvstates.go index bfcaf13613..4637b71d2f 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -6,8 +6,7 @@ import ( "fmt" "strings" "sync" - - "github.com/ethereum/go-ethereum/metrics" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -32,6 +31,10 @@ const ( AccountSuicide ) +const ( + asyncDepGenChanSize = 10000 +) + func AccountStateKey(account common.Address, state AccountState) RWKey { var key RWKey key[0] = AccountStatePrefix @@ -99,15 +102,16 @@ type RWSet struct { readSet map[RWKey]*RWItem writeSet map[RWKey]*RWItem + // some flags rwRecordDone bool - mustSerial bool + excludedTx bool } func NewRWSet(ver StateVersion) *RWSet { return &RWSet{ ver: ver, - readSet: make(map[RWKey]*RWItem), - writeSet: make(map[RWKey]*RWItem), + readSet: make(map[RWKey]*RWItem, 64), + writeSet: make(map[RWKey]*RWItem, 32), } } @@ -146,19 +150,11 @@ func (s *RWSet) WriteSet() map[RWKey]*RWItem { return s.writeSet } -func (s *RWSet) WithSerialFlag() *RWSet { - s.mustSerial = true +func (s *RWSet) WithExcludedTxFlag() *RWSet { + s.excludedTx = true return s } -func (s *RWSet) RWRecordDone() bool { - return s.rwRecordDone -} - -func (s *RWSet) SetRWRecordDone() { - s.rwRecordDone = true -} - func (s *RWSet) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("tx: %v, inc: %v\nreadSet: [", s.ver.TxIndex, s.ver.TxIncarnation)) @@ -245,7 +241,7 @@ type PendingWrites struct { func NewPendingWrites() *PendingWrites { return &PendingWrites{ - list: make([]*RWItem, 0), + list: make([]*RWItem, 0, 8), } } @@ -290,14 +286,31 @@ func (w *PendingWrites) FindLastWrite(txIndex int) *RWItem { return nil } +func (w *PendingWrites) FindPrevWrites(txIndex int) []*RWItem { + var i, _ = w.SearchTxIndex(txIndex) + for j := i - 1; j >= 0; j-- { + if w.list[j].TxIndex() < txIndex { + return w.list[:j+1] + } + } + + return nil +} + type MVStates struct { rwSets map[int]*RWSet pendingWriteSet map[RWKey]*PendingWrites nextFinaliseIndex int // dependency map cache for generating TxDAG - // depsCache[i].exist(j) means j->i, and i > j - depsCache map[int]TxDepMap + // depMapCache[i].exist(j) means j->i, and i > j + depMapCache map[int]TxDepMap + depsCache map[int][]uint64 + + // async dep analysis + depsGenChan chan int + stopChan chan struct{} + asyncRunning bool // execution stat infos stats map[int]*ExeStat @@ -308,11 +321,56 @@ func NewMVStates(txCount int) *MVStates { return &MVStates{ rwSets: make(map[int]*RWSet, txCount), pendingWriteSet: make(map[RWKey]*PendingWrites, txCount*8), - depsCache: make(map[int]TxDepMap, txCount), + depMapCache: make(map[int]TxDepMap, txCount), + depsCache: make(map[int][]uint64, txCount), stats: make(map[int]*ExeStat, txCount), } } +func (s *MVStates) EnableAsyncDepGen() *MVStates { + s.lock.Lock() + defer s.lock.Unlock() + s.depsGenChan = make(chan int, asyncDepGenChanSize) + s.stopChan = make(chan struct{}) + s.asyncRunning = true + go s.asyncDepGenLoop() + return s +} + +func (s *MVStates) Stop() error { + s.lock.Lock() + defer s.lock.Unlock() + s.stopAsyncDepGen() + return nil +} + +func (s *MVStates) stopAsyncDepGen() { + if !s.asyncRunning { + return + } + s.asyncRunning = false + if s.stopChan != nil { + close(s.stopChan) + } +} + +func (s *MVStates) asyncDepGenLoop() { + timeout := time.After(3 * time.Second) + for { + select { + case tx := <-s.depsGenChan: + s.lock.Lock() + s.resolveDepsCacheByWrites(tx, s.rwSets[tx]) + s.lock.Unlock() + case <-s.stopChan: + return + case <-timeout: + log.Warn("asyncDepGenLoop exit by timeout") + return + } + } +} + func (s *MVStates) RWSets() map[int]*RWSet { s.lock.RLock() defer s.lock.RUnlock() @@ -363,13 +421,12 @@ func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { s.stats[index] = stat } - if metrics.EnabledExpensive { - for k := range rwSet.writeSet { - // this action is only for testing, it runs when enable expensive metrics. - checkRWSetInconsistent(index, k, rwSet.readSet, rwSet.writeSet) - } - } - s.resolveDepsCache(index, rwSet) + //if metrics.EnabledExpensive { + // for k := range rwSet.writeSet { + // // this action is only for testing, it runs when enable expensive metrics. + // checkRWSetInconsistent(index, k, rwSet.readSet, rwSet.writeSet) + // } + //} s.rwSets[index] = rwSet return nil } @@ -378,14 +435,15 @@ func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { func (s *MVStates) Finalise(index int) error { log.Debug("Finalise", "total", len(s.rwSets), "index", index) s.lock.Lock() - defer s.lock.Unlock() rwSet := s.rwSets[index] if rwSet == nil { + s.lock.Unlock() return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) } if index != s.nextFinaliseIndex { + s.lock.Unlock() return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) } @@ -397,29 +455,96 @@ func (s *MVStates) Finalise(index int) error { s.pendingWriteSet[k].Append(v) } s.nextFinaliseIndex++ + s.lock.Unlock() + // async resolve dependency, but non-block action + if s.asyncRunning && s.depsGenChan != nil { + s.depsGenChan <- index + } return nil } +func (s *MVStates) resolveDepsCacheByWrites(index int, rwSet *RWSet) { + // analysis dep, if the previous transaction is not executed/validated, re-analysis is required + s.depMapCache[index] = NewTxDeps(8) + if rwSet.excludedTx { + return + } + seen := make(map[int]struct{}, 8) + // check tx dependency, only check key, skip version + if len(s.pendingWriteSet) > len(rwSet.readSet) { + for key := range rwSet.readSet { + // check self destruct + if key.IsAccountSelf() { + key = AccountStateKey(key.Addr(), AccountSuicide) + } + writes := s.pendingWriteSet[key] + if writes == nil { + continue + } + items := writes.FindPrevWrites(index) + for _, item := range items { + seen[item.TxIndex()] = struct{}{} + } + } + } else { + for k, w := range s.pendingWriteSet { + // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. + if k.IsAccountSuicide() { + k = k.ToAccountSelf() + } + if _, ok := rwSet.readSet[k]; !ok { + continue + } + items := w.FindPrevWrites(index) + for _, item := range items { + seen[item.TxIndex()] = struct{}{} + } + } + } + for prev := 0; prev < index; prev++ { + if _, ok := seen[prev]; !ok { + continue + } + s.depMapCache[index].add(prev) + // clear redundancy deps compared with prev + for dep := range s.depMapCache[index] { + if s.depMapCache[prev].exist(dep) { + s.depMapCache[index].remove(dep) + } + } + } + s.depsCache[index] = s.depMapCache[index].toArray() +} + func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - s.depsCache[index] = NewTxDeps(0) + s.depMapCache[index] = NewTxDeps(0) + if rwSet.excludedTx { + return + } for prev := 0; prev < index; prev++ { // if there are some parallel execution or system txs, it will fulfill in advance // it's ok, and try re-generate later - if _, ok := s.rwSets[prev]; !ok { + prevSet, ok := s.rwSets[prev] + if !ok { + continue + } + // if prev tx is tagged ExcludedTxFlag, just skip the check + if prevSet.excludedTx { continue } // check if there has written op before i - if checkDependency(s.rwSets[prev].writeSet, rwSet.readSet) { - s.depsCache[index].add(prev) + if checkDependency(prevSet.writeSet, rwSet.readSet) { + s.depMapCache[index].add(prev) // clear redundancy deps compared with prev - for dep := range s.depsCache[index] { - if s.depsCache[prev].exist(dep) { - s.depsCache[index].remove(dep) + for dep := range s.depMapCache[index] { + if s.depMapCache[prev].exist(dep) { + s.depMapCache[index].remove(dep) } } } } + s.depsCache[index] = s.depMapCache[index].toArray() } func checkRWSetInconsistent(index int, k RWKey, readSet map[RWKey]*RWItem, writeSet map[RWKey]*RWItem) bool { @@ -447,13 +572,17 @@ func checkRWSetInconsistent(index int, k RWKey, readSet map[RWKey]*RWItem, write // ResolveTxDAG generate TxDAG from RWSets func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (TxDAG, error) { - s.lock.RLock() - defer s.lock.RUnlock() + s.lock.Lock() + defer s.lock.Unlock() if len(s.rwSets) != txCnt { return nil, fmt.Errorf("wrong rwSet count, expect: %v, actual: %v", txCnt, len(s.rwSets)) } + if txCnt != s.nextFinaliseIndex { + return nil, fmt.Errorf("resolve in wrong order, next: %d, input: %d", s.nextFinaliseIndex, txCnt) + } + s.stopAsyncDepGen() txDAG := NewPlainTxDAG(len(s.rwSets)) - for i := txCnt - 1; i >= 0; i-- { + for i := 0; i < txCnt; i++ { // check if there are RW with gas fee receiver for gas delay calculation for _, addr := range gasFeeReceivers { if _, ok := s.rwSets[i].readSet[AccountStateKey(addr, AccountSelf)]; ok { @@ -461,20 +590,20 @@ func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (Tx } } txDAG.TxDeps[i].TxIndexes = []uint64{} - if s.rwSets[i].mustSerial { - txDAG.TxDeps[i].Relation = &TxDAGRelation1 + if s.rwSets[i].excludedTx { + txDAG.TxDeps[i].SetFlag(ExcludedTxFlag) continue } - if s.depsCache[i] == nil { - s.resolveDepsCache(i, s.rwSets[i]) + if s.depMapCache[i] == nil { + s.resolveDepsCacheByWrites(i, s.rwSets[i]) } - deps := s.depsCache[i].toArray() + deps := s.depsCache[i] if len(deps) <= (txCnt-1)/2 { txDAG.TxDeps[i].TxIndexes = deps continue } - // if tx deps larger than half of txs, then convert to relation1 - txDAG.TxDeps[i].Relation = &TxDAGRelation1 + // if tx deps larger than half of txs, then convert with NonDependentRelFlag + txDAG.TxDeps[i].SetFlag(NonDependentRelFlag) for j := uint64(0); j < uint64(txCnt); j++ { if !slices.Contains(deps, j) && j != uint64(i) { txDAG.TxDeps[i].TxIndexes = append(txDAG.TxDeps[i].TxIndexes, j) diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 9dcd83a6ba..7a0e16db8c 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -1,12 +1,21 @@ package types import ( + "bytes" + "compress/gzip" + "fmt" "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/golang/snappy" "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) +const mockRWSetSize = 5000 + func TestMVStates_BasicUsage(t *testing.T) { ms := NewMVStates(0) require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(0, []interface{}{"0x00", 0}, []interface{}{"0x00", 0}), nil)) @@ -41,39 +50,179 @@ func TestMVStates_BasicUsage(t *testing.T) { func TestMVStates_SimpleResolveTxDAG(t *testing.T) { ms := NewMVStates(10) + finaliseRWSets(t, ms, []*RWSet{ + mockRWSet(0, []string{"0x00"}, []string{"0x00"}), + mockRWSet(1, []string{"0x01"}, []string{"0x01"}), + mockRWSet(2, []string{"0x02"}, []string{"0x02"}), + mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), + mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), + mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), + mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), + mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), + mockRWSet(8, []string{"0x08"}, []string{"0x08"}), + mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), + }) + + dag, err := ms.ResolveTxDAG(10, nil) + require.NoError(t, err) + require.Equal(t, mockSimpleDAG(), dag) + t.Log(dag) +} - ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) - ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) - ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) - ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) - ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) - ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) - ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) - ms.rwSets[7] = mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}) - ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) - ms.rwSets[9] = mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}) +func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { + ms := NewMVStates(10).EnableAsyncDepGen() + finaliseRWSets(t, ms, []*RWSet{ + mockRWSet(0, []string{"0x00"}, []string{"0x00"}), + mockRWSet(1, []string{"0x01"}, []string{"0x01"}), + mockRWSet(2, []string{"0x02"}, []string{"0x02"}), + mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), + mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), + mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), + mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), + mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), + mockRWSet(8, []string{"0x08"}, []string{"0x08"}), + mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), + }) + time.Sleep(10 * time.Millisecond) dag, err := ms.ResolveTxDAG(10, nil) require.NoError(t, err) + time.Sleep(100 * time.Millisecond) + require.NoError(t, ms.Stop()) require.Equal(t, mockSimpleDAG(), dag) t.Log(dag) } +func TestMVStates_ResolveTxDAG_Async(t *testing.T) { + txCnt := 10000 + rwSets := mockRandomRWSet(txCnt) + ms1 := NewMVStates(txCnt).EnableAsyncDepGen() + for i := 0; i < txCnt; i++ { + require.NoError(t, ms1.FulfillRWSet(rwSets[i], nil)) + require.NoError(t, ms1.Finalise(i)) + } + time.Sleep(100 * time.Millisecond) + _, err := ms1.ResolveTxDAG(txCnt, nil) + require.NoError(t, err) +} + +func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { + txCnt := 3000 + rwSets := mockRandomRWSet(txCnt) + ms1 := NewMVStates(txCnt) + ms2 := NewMVStates(txCnt) + for i, rwSet := range rwSets { + ms1.rwSets[i] = rwSet + ms2.rwSets[i] = rwSet + require.NoError(t, ms2.Finalise(i)) + } + + d1 := resolveTxDAGInMVStates(ms1) + d2 := resolveTxDAGByWritesInMVStates(ms2) + require.Equal(t, d1.(*PlainTxDAG).String(), d2.(*PlainTxDAG).String()) +} + +func TestMVStates_TxDAG_Compression(t *testing.T) { + txCnt := 10000 + rwSets := mockRandomRWSet(txCnt) + ms1 := NewMVStates(txCnt) + for i, rwSet := range rwSets { + ms1.rwSets[i] = rwSet + ms1.Finalise(i) + } + dag := resolveTxDAGByWritesInMVStates(ms1) + enc, err := EncodeTxDAG(dag) + require.NoError(t, err) + + // snappy compression + start := time.Now() + encoded := snappy.Encode(nil, enc) + t.Log("snappy", "enc", len(enc), "compressed", len(encoded), + "ratio", 1-(float64(len(encoded))/float64(len(enc))), + "time", float64(time.Since(start).Microseconds())/1000) + + // gzip compression + start = time.Now() + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + _, err = zw.Write(enc) + require.NoError(t, err) + err = zw.Close() + require.NoError(t, err) + encoded = buf.Bytes() + t.Log("gzip", "enc", len(enc), "compressed", len(encoded), + "ratio", 1-(float64(len(encoded))/float64(len(enc))), + "time", float64(time.Since(start).Microseconds())/1000) +} + +func BenchmarkResolveTxDAGInMVStates(b *testing.B) { + rwSets := mockRandomRWSet(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize) + for i, rwSet := range rwSets { + ms1.rwSets[i] = rwSet + } + for i := 0; i < b.N; i++ { + resolveTxDAGInMVStates(ms1) + } +} + +func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { + rwSets := mockRandomRWSet(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize) + for i, rwSet := range rwSets { + ms1.rwSets[i] = rwSet + ms1.Finalise(i) + } + for i := 0; i < b.N; i++ { + resolveTxDAGByWritesInMVStates(ms1) + } +} + +func BenchmarkMVStates_Finalise(b *testing.B) { + rwSets := mockRandomRWSet(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize) + for i := 0; i < b.N; i++ { + for k, rwSet := range rwSets { + ms1.rwSets[k] = rwSet + ms1.Finalise(k) + } + } +} + +func resolveTxDAGInMVStates(s *MVStates) TxDAG { + txDAG := NewPlainTxDAG(len(s.rwSets)) + for i := 0; i < len(s.rwSets); i++ { + s.resolveDepsCache(i, s.rwSets[i]) + txDAG.TxDeps[i].TxIndexes = s.depsCache[i] + } + return txDAG +} + +func resolveTxDAGByWritesInMVStates(s *MVStates) TxDAG { + txDAG := NewPlainTxDAG(len(s.rwSets)) + for i := 0; i < len(s.rwSets); i++ { + s.resolveDepsCacheByWrites(i, s.rwSets[i]) + txDAG.TxDeps[i].TxIndexes = s.depsCache[i] + } + return txDAG +} + func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { ms := NewMVStates(12) - - ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) - ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) - ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) - ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) - ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) - ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) - ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) - ms.rwSets[7] = mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}) - ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) - ms.rwSets[9] = mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}) - ms.rwSets[10] = mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithSerialFlag() - ms.rwSets[11] = mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithSerialFlag() + finaliseRWSets(t, ms, []*RWSet{ + mockRWSet(0, []string{"0x00"}, []string{"0x00"}), + mockRWSet(1, []string{"0x01"}, []string{"0x01"}), + mockRWSet(2, []string{"0x02"}, []string{"0x02"}), + mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), + mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), + mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), + mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), + mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), + mockRWSet(8, []string{"0x08"}, []string{"0x08"}), + mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), + mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithExcludedTxFlag(), + mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithExcludedTxFlag(), + }) dag, err := ms.ResolveTxDAG(12, nil) require.NoError(t, err) @@ -83,20 +232,20 @@ func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { ms := NewMVStates(12) - - ms.rwSets[0] = mockRWSet(0, []string{"0x00"}, []string{"0x00"}) - ms.rwSets[1] = mockRWSet(1, []string{"0x01"}, []string{"0x01"}) - ms.rwSets[2] = mockRWSet(2, []string{"0x02"}, []string{"0x02"}) - ms.rwSets[3] = mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}) - ms.rwSets[4] = mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}) - ms.rwSets[5] = mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}) - ms.rwSets[6] = mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}) - ms.rwSets[7] = mockRWSet(7, []string{"0x00", "0x01", "0x03", "0x05", "0x06", "0x07"}, []string{"0x07"}) - ms.rwSets[8] = mockRWSet(8, []string{"0x08"}, []string{"0x08"}) - ms.rwSets[9] = mockRWSet(9, []string{"0x00", "0x01", "0x02", "0x03", "0x04", "0x08", "0x09"}, []string{"0x09"}) - ms.rwSets[10] = mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithSerialFlag() - ms.rwSets[11] = mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithSerialFlag() - + finaliseRWSets(t, ms, []*RWSet{ + mockRWSet(0, []string{"0x00"}, []string{"0x00"}), + mockRWSet(1, []string{"0x01"}, []string{"0x01"}), + mockRWSet(2, []string{"0x02"}, []string{"0x02"}), + mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), + mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), + mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), + mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), + mockRWSet(7, []string{"0x00", "0x03", "0x07"}, []string{"0x07"}), + mockRWSet(8, []string{"0x08"}, []string{"0x08"}), + mockRWSet(9, []string{"0x00", "0x01", "0x02", "0x06", "0x07", "0x08", "0x09"}, []string{"0x09"}), + mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithExcludedTxFlag(), + mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithExcludedTxFlag(), + }) dag, err := ms.ResolveTxDAG(12, nil) require.NoError(t, err) require.Equal(t, mockSystemTxDAGWithLargeDeps(), dag) @@ -255,6 +404,51 @@ func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet return set } +func mockRandomRWSet(count int) []*RWSet { + var ret []*RWSet + for i := 0; i < count; i++ { + read := []string{fmt.Sprintf("0x%d", i)} + write := []string{fmt.Sprintf("0x%d", i)} + if i != 0 && rand.Bool() { + depCnt := rand.Int()%i + 1 + last := 0 + for j := 0; j < depCnt; j++ { + num, ok := randInRange(last, i) + if !ok { + break + } + read = append(read, fmt.Sprintf("0x%d", num)) + last = num + } + } + // random read + for j := 0; j < 20; j++ { + read = append(read, fmt.Sprintf("rr-%d-%d", j, rand.Int())) + } + for j := 0; j < 5; j++ { + read = append(read, fmt.Sprintf("rw-%d-%d", j, rand.Int())) + } + // random write + s := mockRWSet(i, read, write) + ret = append(ret, s) + } + return ret +} + +func finaliseRWSets(t *testing.T, mv *MVStates, rwSets []*RWSet) { + for i, rwSet := range rwSets { + require.NoError(t, mv.FulfillRWSet(rwSet, nil)) + require.NoError(t, mv.Finalise(i)) + } +} + +func randInRange(i, j int) (int, bool) { + if i >= j { + return 0, false + } + return rand.Int()%(j-i) + i, true +} + func str2key(k string) RWKey { key := RWKey{} if len(k) > len(key) { diff --git a/core/vm/interface.go b/core/vm/interface.go index 0242db0138..49de25e803 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -82,6 +82,7 @@ type StateDB interface { TxIndex() int + // parallel DAG related BeforeTxTransition() FinaliseRWSet() error } diff --git a/miner/worker.go b/miner/worker.go index 41c3d254de..43b9b6f454 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1190,9 +1190,6 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err w.mu.RUnlock() start := time.Now() - if w.chain.TxDAGEnabled() { - env.state.ResetMVStates(0) - } // Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees filter := txpool.PendingFilter{ MinTip: tip, @@ -1277,6 +1274,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { misc.EnsureCreate2Deployer(w.chainConfig, work.header.Time, work.state) start := time.Now() + if w.chain.TxDAGEnabledWhenMine() { + work.state.ResetMVStates(0) + } for _, tx := range genParams.txs { from, _ := types.Sender(work.signer, tx) work.state.SetTxContext(tx.Hash(), work.tcount) @@ -1284,6 +1284,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { if err != nil { return &newPayloadResult{err: fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err)} } + if tx.IsSystemTx() || tx.IsDepositTx() { + work.state.RecordSystemTxRWSet(work.tcount) + } work.tcount++ } commitDepositTxsTimer.UpdateSince(start) @@ -1337,6 +1340,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { if intr := genParams.interrupt; intr != nil && genParams.isUpdate && intr.Load() != commitInterruptNone { return &newPayloadResult{err: errInterruptedUpdate} } + // TODO(galaio): fulfill TxDAG to mined block + //if w.chain.TxDAGEnabledWhenMine() { + //} start = time.Now() block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, genParams.withdrawals) @@ -1347,15 +1353,6 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { return &newPayloadResult{err: fmt.Errorf("empty block root")} } - // TODO(galaio): fulfill TxDAG to mined block - //if w.chain.TxDAGEnabled() && w.chainConfig.Optimism != nil { - // txDAG, _ := work.state.ResolveTxDAG([]common.Address{work.coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) - // rawTxDAG, err := types.EncodeTxDAG(txDAG) - // if err != nil { - // return &newPayloadResult{err: err} - // } - //} - assembleBlockTimer.UpdateSince(start) log.Debug("assembleBlockTimer", "duration", common.PrettyDuration(time.Since(start)), "parentHash", genParams.parentHash) From a70fe85f1294a748453235ccf8daa6b4a6a57412 Mon Sep 17 00:00:00 2001 From: andyzhang2023 <147463846+andyzhang2023@users.noreply.github.com> Date: Tue, 13 Aug 2024 21:06:16 +0800 Subject: [PATCH 04/42] txDAG transfer (#28) * txDAG transfer * encode/decode txDAG data with ABI * set txDAG receiver to a special address --------- Co-authored-by: andyzhang2023 merge conflict fix conflict --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 14 ++++++++ miner/miner.go | 3 ++ miner/worker.go | 81 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 99 insertions(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index b6ccb08433..784be6d7f3 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -171,6 +171,7 @@ var ( utils.RollupHaltOnIncompatibleProtocolVersionFlag, utils.RollupSuperchainUpgradesFlag, utils.ParallelTxDAGFlag, + utils.ParallelTxDAGSenderPrivFlag, configFileFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index b96207dd2b..ac46716bc6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1110,6 +1110,13 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Usage: "enable opcode optimization", Category: flags.VMCategory, } + + ParallelTxDAGSenderPrivFlag = &cli.StringFlag{ + Name: "parallel.txdagsenderpriv", + Usage: "private key of the sender who sends the TxDAG transactions", + Value: "", + Category: flags.VMCategory, + } ) var ( @@ -1999,6 +2006,13 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EnableParallelTxDAG = ctx.Bool(ParallelTxDAGFlag.Name) } + if ctx.IsSet(ParallelTxDAGSenderPrivFlag.Name) { + priHex := ctx.String(ParallelTxDAGSenderPrivFlag.Name) + if cfg.Miner.ParallelTxDAGSenderPriv, err = crypto.HexToECDSA(priHex); err != nil { + Fatalf("Failed to parse txdag private key of %s, err: %v", ParallelTxDAGSenderPrivFlag.Name, err) + } + } + if ctx.IsSet(VMOpcodeOptimizeFlag.Name) { cfg.EnableOpcodeOptimizing = ctx.Bool(VMOpcodeOptimizeFlag.Name) if cfg.EnableOpcodeOptimizing { diff --git a/miner/miner.go b/miner/miner.go index 1f407851f4..08c60c186e 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -19,6 +19,7 @@ package miner import ( "context" + "crypto/ecdsa" "errors" "fmt" "math/big" @@ -109,6 +110,8 @@ type Config struct { EffectiveGasCeil uint64 // if non-zero, a gas ceiling to apply independent of the header's gaslimit value Mev MevConfig // Mev configuration + + ParallelTxDAGSenderPriv *ecdsa.PrivateKey // The private key for the parallel tx DAG sender } // DefaultConfig contains default settings for miner. diff --git a/miner/worker.go b/miner/worker.go index 43b9b6f454..dbfd255f0b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -18,6 +18,7 @@ package miner import ( "context" + "crypto/ecdsa" "errors" "fmt" "math/big" @@ -40,6 +41,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -102,6 +104,10 @@ var ( txErrReplayMeter = metrics.NewRegisteredMeter("miner/tx/replay", nil) ) +var ( + DefaultTxDAGAddress = common.HexToAddress("0xda90000000000000000000000000000000000000") +) + // environment is the worker's current environment and holds all // information of the sealing block generation. type environment struct { @@ -916,10 +922,32 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac } var coalescedLogs []*types.Log + //append the tx DAG transaction to the block + appendTxDAG := func() { + // whether enable TxDAG + if !w.chain.TxDAGEnabledWhenMine() { + return + } + // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB + txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) + if err != nil { + log.Warn("failed to generate DAG tx", "err", err) + return + } + logs, err := w.commitTransaction(env, txForDAG) + if err != nil { + log.Warn("failed to commit DAG tx", "err", err) + return + } + coalescedLogs = append(coalescedLogs, logs...) + env.tcount++ + } + for { // Check interruption signal and abort building if it's fired. if interrupt != nil { if signal := interrupt.Load(); signal != commitInterruptNone { + appendTxDAG() return signalToErr(signal) } } @@ -1018,6 +1046,7 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac txErrUnknownMeter.Mark(1) } } + appendTxDAG() if !w.isRunning() && len(coalescedLogs) > 0 { // We don't push the pendingLogsEvent while we are sealing. The reason is that // when we are sealing, the worker will regenerate a sealing block every 3 seconds. @@ -1036,6 +1065,58 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac return nil } +// generateDAGTx generates a DAG transaction for the block +func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, coinbase common.Address) (*types.Transaction, error) { + if statedb == nil { + return nil, fmt.Errorf("failed to get state db, env.state=nil") + } + + if signer == nil { + return nil, fmt.Errorf("current signer is nil") + } + + //privateKey, err := crypto.HexToECDSA(privateKeyHex) + sender := w.config.ParallelTxDAGSenderPriv + receiver := DefaultTxDAGAddress + if sender == nil { + return nil, fmt.Errorf("missing sender private key") + } + + // get txDAG data from the stateDB + txDAG, err := statedb.ResolveTxDAG(txIndex, []common.Address{coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + if txDAG == nil { + return nil, err + } + // txIndex is the index of this txDAG transaction + txDAG.SetTxDep(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) + + publicKey := sender.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("error casting public key to ECDSA") + } + fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) + + // get nonce from the + nonce := statedb.GetNonce(fromAddress) + + data, err := types.EncodeTxDAGCalldata(txDAG) + if err != nil { + return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) + } + + // Create the transaction + tx := types.NewTransaction(nonce, receiver, big.NewInt(0), 21100, big.NewInt(0), data) + + // Sign the transaction with the private key + signedTx, err := types.SignTx(tx, signer, sender) + if err != nil { + return nil, fmt.Errorf("failed to sign transaction, err: %v", err) + } + + return signedTx, nil +} + // generateParams wraps various of settings for generating sealing task. type generateParams struct { timestamp uint64 // The timestamp for sealing task From 5f9c6197cb358971383da17978a6a226a585a465 Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Tue, 27 Aug 2024 16:18:10 +0800 Subject: [PATCH 05/42] fix bug: only generate DAG tx after both 'remote' and 'local' transactions are executed --- miner/worker.go | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index dbfd255f0b..0461ca2363 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -922,32 +922,10 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac } var coalescedLogs []*types.Log - //append the tx DAG transaction to the block - appendTxDAG := func() { - // whether enable TxDAG - if !w.chain.TxDAGEnabledWhenMine() { - return - } - // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB - txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) - if err != nil { - log.Warn("failed to generate DAG tx", "err", err) - return - } - logs, err := w.commitTransaction(env, txForDAG) - if err != nil { - log.Warn("failed to commit DAG tx", "err", err) - return - } - coalescedLogs = append(coalescedLogs, logs...) - env.tcount++ - } - for { // Check interruption signal and abort building if it's fired. if interrupt != nil { if signal := interrupt.Load(); signal != commitInterruptNone { - appendTxDAG() return signalToErr(signal) } } @@ -1046,7 +1024,6 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac txErrUnknownMeter.Mark(1) } } - appendTxDAG() if !w.isRunning() && len(coalescedLogs) > 0 { // We don't push the pendingLogsEvent while we are sealing. The reason is that // when we are sealing, the worker will regenerate a sealing block every 3 seconds. @@ -1304,6 +1281,27 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err localBlobTxs[account] = txs } } + + // generate and append DAG tx + appendTxDAG := func() { + // whether enable TxDAG + if !w.chain.TxDAGEnabledWhenMine() { + return + } + // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB + txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) + if err != nil { + log.Warn("failed to generate DAG tx", "err", err) + return + } + _, err = w.commitTransaction(env, txForDAG) + if err != nil { + log.Warn("failed to commit DAG tx", "err", err) + return + } + env.tcount++ + } + // Fill the block with all available pending transactions. start = time.Now() if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 { @@ -1322,6 +1320,8 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err return err } } + // append a DAG tx at the end of the block + appendTxDAG() commitTxpoolTxsTimer.UpdateSince(start) log.Debug("commitTxpoolTxsTimer", "duration", common.PrettyDuration(time.Since(start)), "hash", env.header.Hash()) return nil From 560af7170be2bf7240debae124a0e4b13aff5e56 Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Tue, 27 Aug 2024 16:21:45 +0800 Subject: [PATCH 06/42] fix bug: only generate DAG tx after both 'remote' and 'local' transactions are executed --- miner/worker.go | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 0461ca2363..76a9924cfd 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1042,6 +1042,26 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac return nil } +// generate and append DAG tx +func (w *worker) appendTxDAG(env *environment) { + // whether enable TxDAG + if !w.chain.TxDAGEnabledWhenMine() { + return + } + // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB + txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) + if err != nil { + log.Warn("failed to generate DAG tx", "err", err) + return + } + _, err = w.commitTransaction(env, txForDAG) + if err != nil { + log.Warn("failed to commit DAG tx", "err", err) + return + } + env.tcount++ +} + // generateDAGTx generates a DAG transaction for the block func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, coinbase common.Address) (*types.Transaction, error) { if statedb == nil { @@ -1282,26 +1302,6 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err } } - // generate and append DAG tx - appendTxDAG := func() { - // whether enable TxDAG - if !w.chain.TxDAGEnabledWhenMine() { - return - } - // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB - txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) - if err != nil { - log.Warn("failed to generate DAG tx", "err", err) - return - } - _, err = w.commitTransaction(env, txForDAG) - if err != nil { - log.Warn("failed to commit DAG tx", "err", err) - return - } - env.tcount++ - } - // Fill the block with all available pending transactions. start = time.Now() if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 { @@ -1321,7 +1321,7 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err } } // append a DAG tx at the end of the block - appendTxDAG() + w.appendTxDAG(env) commitTxpoolTxsTimer.UpdateSince(start) log.Debug("commitTxpoolTxsTimer", "duration", common.PrettyDuration(time.Since(start)), "hash", env.header.Hash()) return nil From 63c802ae9a02c831c53f67c4b52ba4d5358eeab0 Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Thu, 29 Aug 2024 15:32:03 +0800 Subject: [PATCH 07/42] fix: append dag tx to the end of bundle txs --- miner/worker.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 76a9924cfd..1f072cef7b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1320,8 +1320,6 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err return err } } - // append a DAG tx at the end of the block - w.appendTxDAG(env) commitTxpoolTxsTimer.UpdateSince(start) log.Debug("commitTxpoolTxsTimer", "duration", common.PrettyDuration(time.Since(start)), "hash", env.header.Hash()) return nil @@ -1400,6 +1398,8 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { }() err := w.fillTransactionsAndBundles(interrupt, work) wg.Wait() + // append a DAG tx at the end of the block + w.appendTxDAG(work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errFillBundleInterrupted) { log.Warn("fill bundles is interrupted, discard", "err", err) @@ -1407,6 +1407,8 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { } } else { err := w.fillTransactions(interrupt, work) + // append a DAG tx at the end of the block + w.appendTxDAG(work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout), "parentHash", genParams.parentHash) From 7ef7ba877c135cad6a64baa0f103825315985fee Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Thu, 29 Aug 2024 16:06:51 +0800 Subject: [PATCH 08/42] fix: copy with mv state --- miner/worker.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 1f072cef7b..b9254d8051 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -130,7 +130,7 @@ type environment struct { func (env *environment) copy() *environment { cpy := &environment{ signer: env.signer, - state: env.state.Copy(), + state: env.state.CopyWithMvStates(), tcount: env.tcount, coinbase: env.coinbase, header: types.CopyHeader(env.header), @@ -1398,8 +1398,6 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { }() err := w.fillTransactionsAndBundles(interrupt, work) wg.Wait() - // append a DAG tx at the end of the block - w.appendTxDAG(work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errFillBundleInterrupted) { log.Warn("fill bundles is interrupted, discard", "err", err) @@ -1407,8 +1405,6 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { } } else { err := w.fillTransactions(interrupt, work) - // append a DAG tx at the end of the block - w.appendTxDAG(work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout), "parentHash", genParams.parentHash) @@ -1423,9 +1419,10 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { if intr := genParams.interrupt; intr != nil && genParams.isUpdate && intr.Load() != commitInterruptNone { return &newPayloadResult{err: errInterruptedUpdate} } - // TODO(galaio): fulfill TxDAG to mined block - //if w.chain.TxDAGEnabledWhenMine() { - //} + if w.chain.TxDAGEnabledWhenMine() { + // append a DAG tx at the end of the block + w.appendTxDAG(work) + } start = time.Now() block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, genParams.withdrawals) From d11a90b03b42e4961b79ab59be99d23d64ad32ea Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 29 Aug 2024 14:38:59 +0800 Subject: [PATCH 09/42] mvstates: abandon RWKey, using origin key to record rwset; mvstates: support txdepMap to produce txdag; mvststes: using struct in rwset; --- core/state/state_object.go | 8 +- core/state/statedb.go | 65 ++-- core/types/mvstates.go | 589 +++++++++++++++++++++--------------- core/types/mvstates_test.go | 299 +++++++++++------- 4 files changed, 580 insertions(+), 381 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index b432603ffe..d1af1f2a58 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -306,17 +306,17 @@ func (s *stateObject) finaliseRWSet() { if value == s.GetCommittedState(key) { continue } - s.db.RecordWrite(types.StorageStateKey(s.address, key), value) + s.db.RecordStorageWrite(s.address, key, value) } if s.dirtyNonce != nil && *s.dirtyNonce != s.data.Nonce { - s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountNonce), *s.dirtyNonce) + s.db.RecordAccountWrite(s.address, types.AccountNonce, *s.dirtyNonce) } if s.dirtyBalance != nil && s.dirtyBalance.Cmp(s.data.Balance) != 0 { - s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountBalance), new(uint256.Int).Set(s.dirtyBalance)) + s.db.RecordAccountWrite(s.address, types.AccountBalance, new(uint256.Int).Set(s.dirtyBalance)) } if s.dirtyCodeHash != nil && !slices.Equal(s.dirtyCodeHash, s.data.CodeHash) { - s.db.RecordWrite(types.AccountStateKey(s.address, types.AccountCodeHash), s.dirtyCodeHash) + s.db.RecordAccountWrite(s.address, types.AccountCodeHash, s.dirtyCodeHash) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 23bacbfafd..dcbc4ccaf2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -348,7 +348,7 @@ func (s *StateDB) Empty(addr common.Address) bool { // GetBalance retrieves the balance from the given address or 0 if object not found func (s *StateDB) GetBalance(addr common.Address) (ret *uint256.Int) { defer func() { - s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), ret) + s.RecordAccountRead(addr, types.AccountBalance, ret) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -360,7 +360,7 @@ func (s *StateDB) GetBalance(addr common.Address) (ret *uint256.Int) { // GetNonce retrieves the nonce from the given address or 0 if object not found func (s *StateDB) GetNonce(addr common.Address) (ret uint64) { defer func() { - s.RecordRead(types.AccountStateKey(addr, types.AccountNonce), ret) + s.RecordAccountRead(addr, types.AccountNonce, ret) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -387,7 +387,7 @@ func (s *StateDB) TxIndex() int { func (s *StateDB) GetCode(addr common.Address) []byte { defer func() { - s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), s.GetCodeHash(addr)) + s.RecordAccountRead(addr, types.AccountCodeHash, s.GetCodeHash(addr)) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -398,7 +398,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte { func (s *StateDB) GetCodeSize(addr common.Address) int { defer func() { - s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), s.GetCodeHash(addr)) + s.RecordAccountRead(addr, types.AccountCodeHash, s.GetCodeHash(addr)) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -409,7 +409,7 @@ func (s *StateDB) GetCodeSize(addr common.Address) int { func (s *StateDB) GetCodeHash(addr common.Address) (ret common.Hash) { defer func() { - s.RecordRead(types.AccountStateKey(addr, types.AccountCodeHash), ret.Bytes()) + s.RecordAccountRead(addr, types.AccountCodeHash, ret.Bytes()) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -421,7 +421,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) (ret common.Hash) { // GetState retrieves a value from the given account's storage trie. func (s *StateDB) GetState(addr common.Address, hash common.Hash) (ret common.Hash) { defer func() { - s.RecordRead(types.StorageStateKey(addr, hash), ret) + s.RecordStorageRead(addr, hash, ret) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -433,7 +433,7 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) (ret common.Ha // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) (ret common.Hash) { defer func() { - s.RecordRead(types.StorageStateKey(addr, hash), ret) + s.RecordStorageRead(addr, hash, ret) }() stateObject := s.getStateObject(addr) if stateObject != nil { @@ -463,22 +463,22 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool { func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { - s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), stateObject.Balance()) + s.RecordAccountRead(addr, types.AccountBalance, stateObject.Balance()) stateObject.AddBalance(amount) return } - s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), common.Big0) + s.RecordAccountRead(addr, types.AccountBalance, common.Big0) } // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { - s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), stateObject.Balance()) + s.RecordAccountRead(addr, types.AccountBalance, stateObject.Balance()) stateObject.SubBalance(amount) return } - s.RecordRead(types.AccountStateKey(addr, types.AccountBalance), common.Big0) + s.RecordAccountRead(addr, types.AccountBalance, common.Big0) } func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) { @@ -658,7 +658,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { // flag set. This is needed by the state journal to revert to the correct s- // destructed object instead of wiping all knowledge about the state object. func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { - s.RecordRead(types.AccountStateKey(addr, types.AccountSelf), struct{}{}) + s.RecordAccountRead(addr, types.AccountSelf, struct{}{}) // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj @@ -1717,32 +1717,49 @@ func (s *StateDB) StopTxStat(usedGas uint64) { s.stat.Done().WithGas(usedGas) rwSet := s.mvStates.RWSet(s.txIndex) if rwSet != nil { - s.stat.WithRead(len(rwSet.ReadSet())) + ar, sr := rwSet.ReadSet() + s.stat.WithRead(len(ar) + len(sr)) } } } -func (s *StateDB) RecordRead(key types.RWKey, val interface{}) { +func (s *StateDB) RecordAccountRead(addr common.Address, state types.AccountState, val interface{}) { if s.rwSet == nil { return } - s.rwSet.RecordRead(key, types.StateVersion{ + s.rwSet.RecordAccountRead(addr, state, types.StateVersion{ TxIndex: -1, }, val) } -func (s *StateDB) RecordWrite(key types.RWKey, val interface{}) { +func (s *StateDB) RecordStorageRead(addr common.Address, slot common.Hash, val interface{}) { if s.rwSet == nil { return } - s.rwSet.RecordWrite(key, val) + s.rwSet.RecordStorageRead(addr, slot, types.StateVersion{ + TxIndex: -1, + }, val) +} + +func (s *StateDB) RecordAccountWrite(addr common.Address, state types.AccountState, val interface{}) { + if s.rwSet == nil { + return + } + s.rwSet.RecordAccountWrite(addr, state, val) +} + +func (s *StateDB) RecordStorageWrite(addr common.Address, slot common.Hash, val interface{}) { + if s.rwSet == nil { + return + } + s.rwSet.RecordStorageWrite(addr, slot, val) } func (s *StateDB) ResetMVStates(txCount int) { if s.mvStates != nil { s.mvStates.Stop() } - s.mvStates = types.NewMVStates(txCount).EnableAsyncDepGen() + s.mvStates = types.NewMVStates(txCount).EnableAsyncGen() s.rwSet = nil } @@ -1766,7 +1783,7 @@ func (s *StateDB) FinaliseRWSet() error { // finalise stateObjectsDestruct for addr := range s.stateObjectsDestructDirty { - s.RecordWrite(types.AccountStateKey(addr, types.AccountSuicide), struct{}{}) + s.RecordAccountWrite(addr, types.AccountSuicide, struct{}{}) } for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] @@ -1778,8 +1795,7 @@ func (s *StateDB) FinaliseRWSet() error { // set indefinitely). Note only the first occurred self-destruct // event is tracked. if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - log.Debug("FinaliseRWSet find Destruct", "tx", s.txIndex, "addr", addr, "selfDestructed", obj.selfDestructed) - s.RecordWrite(types.AccountStateKey(addr, types.AccountSuicide), struct{}{}) + s.RecordAccountWrite(addr, types.AccountSuicide, struct{}{}) } } else { // finalise account & storages @@ -1793,7 +1809,8 @@ func (s *StateDB) FinaliseRWSet() error { return err } // just Finalise rwSet in serial execution - return s.mvStates.Finalise(s.txIndex) + s.mvStates.AsyncFinalise(s.txIndex) + return nil } func (s *StateDB) getStateObjectsDestruct(addr common.Address) (*types.StateAccount, bool) { @@ -1844,7 +1861,9 @@ func (s *StateDB) RecordSystemTxRWSet(index int) { s.mvStates.FulfillRWSet(types.NewRWSet(types.StateVersion{ TxIndex: index, }).WithExcludedTxFlag(), types.NewExeStat(index).WithExcludedTxFlag()) - s.mvStates.Finalise(index) + if err := s.mvStates.Finalise(index); err != nil { + log.Error("MVStates SystemTx Finalise err", "err", err) + } } // copySet returns a deep-copied set. diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 4637b71d2f..c6601c45b3 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -1,12 +1,10 @@ package types import ( - "encoding/hex" "errors" "fmt" "strings" "sync" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -14,79 +12,20 @@ import ( "golang.org/x/exp/slices" ) -const ( - AccountStatePrefix = 'a' - StorageStatePrefix = 's' -) - -type RWKey [1 + common.AddressLength + common.HashLength]byte - type AccountState byte -const ( - AccountSelf AccountState = iota - AccountNonce - AccountBalance - AccountCodeHash - AccountSuicide +var ( + AccountSelf AccountState = 0x01 + AccountNonce AccountState = 0x02 + AccountBalance AccountState = 0x04 + AccountCodeHash AccountState = 0x08 + AccountSuicide AccountState = 0x10 ) const ( - asyncDepGenChanSize = 10000 + asyncDepGenChanSize = 1000 ) -func AccountStateKey(account common.Address, state AccountState) RWKey { - var key RWKey - key[0] = AccountStatePrefix - copy(key[1:], account.Bytes()) - key[1+common.AddressLength] = byte(state) - return key -} - -func StorageStateKey(account common.Address, state common.Hash) RWKey { - var key RWKey - key[0] = StorageStatePrefix - copy(key[1:], account.Bytes()) - copy(key[1+common.AddressLength:], state.Bytes()) - return key -} - -func (key *RWKey) IsAccountState() (bool, AccountState) { - return AccountStatePrefix == key[0], AccountState(key[1+common.AddressLength]) -} - -func (key *RWKey) IsAccountSelf() bool { - ok, s := key.IsAccountState() - if !ok { - return false - } - return s == AccountSelf -} - -func (key *RWKey) IsAccountSuicide() bool { - ok, s := key.IsAccountState() - if !ok { - return false - } - return s == AccountSuicide -} - -func (key *RWKey) ToAccountSelf() RWKey { - return AccountStateKey(key.Addr(), AccountSelf) -} - -func (key *RWKey) IsStorageState() bool { - return StorageStatePrefix == key[0] -} - -func (key *RWKey) String() string { - return hex.EncodeToString(key[:]) -} - -func (key *RWKey) Addr() common.Address { - return common.BytesToAddress(key[1 : 1+common.AddressLength]) -} - // StateVersion record specific TxIndex & TxIncarnation // if TxIndex equals to -1, it means the state read from DB. type StateVersion struct { @@ -98,9 +37,11 @@ type StateVersion struct { // RWSet record all read & write set in txs // Attention: this is not a concurrent safety structure type RWSet struct { - ver StateVersion - readSet map[RWKey]*RWItem - writeSet map[RWKey]*RWItem + ver StateVersion + accReadSet map[common.Address]map[AccountState]RWItem + accWriteSet map[common.Address]map[AccountState]RWItem + slotReadSet map[common.Address]map[common.Hash]RWItem + slotWriteSet map[common.Address]map[common.Hash]RWItem // some flags rwRecordDone bool @@ -109,45 +50,112 @@ type RWSet struct { func NewRWSet(ver StateVersion) *RWSet { return &RWSet{ - ver: ver, - readSet: make(map[RWKey]*RWItem, 64), - writeSet: make(map[RWKey]*RWItem, 32), + ver: ver, + accReadSet: make(map[common.Address]map[AccountState]RWItem), + accWriteSet: make(map[common.Address]map[AccountState]RWItem), + slotReadSet: make(map[common.Address]map[common.Hash]RWItem), + slotWriteSet: make(map[common.Address]map[common.Hash]RWItem), } } -func (s *RWSet) RecordRead(key RWKey, ver StateVersion, val interface{}) { +func (s *RWSet) RecordAccountRead(addr common.Address, state AccountState, ver StateVersion, val interface{}) { // only record the first read version - if _, exist := s.readSet[key]; exist { + sub, ok := s.accReadSet[addr] + if !ok { + s.accReadSet[addr] = make(map[AccountState]RWItem) + s.accReadSet[addr][state] = RWItem{ + Ver: ver, + Val: val, + } return } - s.readSet[key] = &RWItem{ + if _, ok = sub[state]; ok { + return + } + s.accReadSet[addr][state] = RWItem{ Ver: ver, Val: val, } } -func (s *RWSet) RecordWrite(key RWKey, val interface{}) { - wr, exist := s.writeSet[key] - if !exist { - s.writeSet[key] = &RWItem{ - Ver: s.ver, +func (s *RWSet) RecordStorageRead(addr common.Address, slot common.Hash, ver StateVersion, val interface{}) { + // only record the first read version + sub, ok := s.slotReadSet[addr] + if !ok { + s.slotReadSet[addr] = make(map[common.Hash]RWItem) + s.slotReadSet[addr][slot] = RWItem{ + Ver: ver, Val: val, } return } - wr.Val = val + if _, ok = sub[slot]; ok { + return + } + s.slotReadSet[addr][slot] = RWItem{ + Ver: ver, + Val: val, + } +} + +func (s *RWSet) RecordAccountWrite(addr common.Address, state AccountState, val interface{}) { + _, ok := s.accWriteSet[addr] + if !ok { + s.accWriteSet[addr] = make(map[AccountState]RWItem) + } + s.accWriteSet[addr][state] = RWItem{ + Ver: s.ver, + Val: val, + } +} + +func (s *RWSet) RecordStorageWrite(addr common.Address, slot common.Hash, val interface{}) { + _, ok := s.slotWriteSet[addr] + if !ok { + s.slotWriteSet[addr] = make(map[common.Hash]RWItem) + } + s.slotWriteSet[addr][slot] = RWItem{ + Ver: s.ver, + Val: val, + } +} + +func (s *RWSet) queryAccReadItem(addr common.Address, state AccountState) *RWItem { + sub, ok := s.accReadSet[addr] + if !ok { + return nil + } + + ret, ok := sub[state] + if !ok { + return nil + } + return &ret +} + +func (s *RWSet) querySlotReadItem(addr common.Address, slot common.Hash) *RWItem { + sub, ok := s.slotReadSet[addr] + if !ok { + return nil + } + + ret, ok := sub[slot] + if !ok { + return nil + } + return &ret } func (s *RWSet) Version() StateVersion { return s.ver } -func (s *RWSet) ReadSet() map[RWKey]*RWItem { - return s.readSet +func (s *RWSet) ReadSet() (map[common.Address]map[AccountState]RWItem, map[common.Address]map[common.Hash]RWItem) { + return s.accReadSet, s.slotReadSet } -func (s *RWSet) WriteSet() map[RWKey]*RWItem { - return s.writeSet +func (s *RWSet) WriteSet() (map[common.Address]map[AccountState]RWItem, map[common.Address]map[common.Hash]RWItem) { + return s.accWriteSet, s.slotWriteSet } func (s *RWSet) WithExcludedTxFlag() *RWSet { @@ -159,7 +167,15 @@ func (s *RWSet) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("tx: %v, inc: %v\nreadSet: [", s.ver.TxIndex, s.ver.TxIncarnation)) i := 0 - for key, _ := range s.readSet { + for key, _ := range s.accReadSet { + if i > 0 { + builder.WriteString(fmt.Sprintf(", %v", key.String())) + continue + } + builder.WriteString(fmt.Sprintf("%v", key.String())) + i++ + } + for key, _ := range s.slotReadSet { if i > 0 { builder.WriteString(fmt.Sprintf(", %v", key.String())) continue @@ -169,7 +185,15 @@ func (s *RWSet) String() string { } builder.WriteString("]\nwriteSet: [") i = 0 - for key, _ := range s.writeSet { + for key, _ := range s.accWriteSet { + if i > 0 { + builder.WriteString(fmt.Sprintf(", %v", key.String())) + continue + } + builder.WriteString(fmt.Sprintf("%v", key.String())) + i++ + } + for key, _ := range s.slotWriteSet { if i > 0 { builder.WriteString(fmt.Sprintf(", %v", key.String())) continue @@ -182,9 +206,9 @@ func (s *RWSet) String() string { } // isEqualRWVal compare state -func isEqualRWVal(key RWKey, src interface{}, compared interface{}) bool { - if ok, state := key.IsAccountState(); ok { - switch state { +func isEqualRWVal(accState *AccountState, src interface{}, compared interface{}) bool { + if accState != nil { + switch *accState { case AccountBalance: if src != nil && compared != nil { return equalUint256(src.(*uint256.Int), compared.(*uint256.Int)) @@ -241,7 +265,7 @@ type PendingWrites struct { func NewPendingWrites() *PendingWrites { return &PendingWrites{ - list: make([]*RWItem, 0, 8), + list: make([]*RWItem, 0), } } @@ -298,19 +322,19 @@ func (w *PendingWrites) FindPrevWrites(txIndex int) []*RWItem { } type MVStates struct { - rwSets map[int]*RWSet - pendingWriteSet map[RWKey]*PendingWrites - nextFinaliseIndex int + rwSets map[int]*RWSet + pendingAccWriteSet map[common.Address]map[AccountState]*PendingWrites + pendingSlotWriteSet map[common.Address]map[common.Hash]*PendingWrites + nextFinaliseIndex int // dependency map cache for generating TxDAG // depMapCache[i].exist(j) means j->i, and i > j - depMapCache map[int]TxDepMap - depsCache map[int][]uint64 + txDepCache []TxDepMaker // async dep analysis - depsGenChan chan int - stopChan chan struct{} - asyncRunning bool + asyncGenChan chan int + asyncStopChan chan struct{} + asyncRunning bool // execution stat infos stats map[int]*ExeStat @@ -319,53 +343,53 @@ type MVStates struct { func NewMVStates(txCount int) *MVStates { return &MVStates{ - rwSets: make(map[int]*RWSet, txCount), - pendingWriteSet: make(map[RWKey]*PendingWrites, txCount*8), - depMapCache: make(map[int]TxDepMap, txCount), - depsCache: make(map[int][]uint64, txCount), - stats: make(map[int]*ExeStat, txCount), + rwSets: make(map[int]*RWSet, txCount), + pendingAccWriteSet: make(map[common.Address]map[AccountState]*PendingWrites, txCount*8), + pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*PendingWrites, txCount*8), + txDepCache: make([]TxDepMaker, 0, txCount), + stats: make(map[int]*ExeStat, txCount), } } -func (s *MVStates) EnableAsyncDepGen() *MVStates { +func (s *MVStates) EnableAsyncGen() *MVStates { s.lock.Lock() defer s.lock.Unlock() - s.depsGenChan = make(chan int, asyncDepGenChanSize) - s.stopChan = make(chan struct{}) + chanSize := asyncDepGenChanSize + if len(s.rwSets) > 0 && len(s.rwSets) < asyncDepGenChanSize { + chanSize = len(s.rwSets) + } + s.asyncGenChan = make(chan int, chanSize) + s.asyncStopChan = make(chan struct{}) s.asyncRunning = true - go s.asyncDepGenLoop() + go s.asyncGenLoop() return s } func (s *MVStates) Stop() error { s.lock.Lock() defer s.lock.Unlock() - s.stopAsyncDepGen() + s.stopAsyncGen() return nil } -func (s *MVStates) stopAsyncDepGen() { +func (s *MVStates) stopAsyncGen() { if !s.asyncRunning { return } s.asyncRunning = false - if s.stopChan != nil { - close(s.stopChan) + if s.asyncStopChan != nil { + close(s.asyncStopChan) } } -func (s *MVStates) asyncDepGenLoop() { - timeout := time.After(3 * time.Second) +func (s *MVStates) asyncGenLoop() { for { select { - case tx := <-s.depsGenChan: - s.lock.Lock() - s.resolveDepsCacheByWrites(tx, s.rwSets[tx]) - s.lock.Unlock() - case <-s.stopChan: - return - case <-timeout: - log.Warn("asyncDepGenLoop exit by timeout") + case tx := <-s.asyncGenChan: + if err := s.Finalise(tx); err != nil { + log.Error("async MVStates Finalise err", "err", err) + } + case <-s.asyncStopChan: return } } @@ -392,12 +416,32 @@ func (s *MVStates) RWSet(index int) *RWSet { return s.rwSets[index] } -// ReadState read state from MVStates -func (s *MVStates) ReadState(txIndex int, key RWKey) *RWItem { +// ReadAccState read state from MVStates +func (s *MVStates) ReadAccState(txIndex int, addr common.Address, state AccountState) *RWItem { + s.lock.RLock() + defer s.lock.RUnlock() + + sub, ok := s.pendingAccWriteSet[addr] + if !ok { + return nil + } + wset, ok := sub[state] + if !ok { + return nil + } + return wset.FindLastWrite(txIndex) +} + +// ReadSlotState read state from MVStates +func (s *MVStates) ReadSlotState(txIndex int, addr common.Address, slot common.Hash) *RWItem { s.lock.RLock() defer s.lock.RUnlock() - wset, ok := s.pendingWriteSet[key] + sub, ok := s.pendingSlotWriteSet[addr] + if !ok { + return nil + } + wset, ok := sub[slot] if !ok { return nil } @@ -407,7 +451,6 @@ func (s *MVStates) ReadState(txIndex int, key RWKey) *RWItem { // FulfillRWSet it can execute as async, and rwSet & stat must guarantee read-only // try to generate TxDAG, when fulfill RWSet func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { - log.Debug("FulfillRWSet", "total", len(s.rwSets), "cur", rwSet.ver.TxIndex, "reads", len(rwSet.readSet), "writes", len(rwSet.writeSet)) s.lock.Lock() defer s.lock.Unlock() index := rwSet.ver.TxIndex @@ -420,106 +463,141 @@ func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { } s.stats[index] = stat } - - //if metrics.EnabledExpensive { - // for k := range rwSet.writeSet { - // // this action is only for testing, it runs when enable expensive metrics. - // checkRWSetInconsistent(index, k, rwSet.readSet, rwSet.writeSet) - // } - //} s.rwSets[index] = rwSet return nil } +// AsyncFinalise it will put target write set into pending writes. +func (s *MVStates) AsyncFinalise(index int) { + // async resolve dependency, but non-block action + if s.asyncRunning && s.asyncGenChan != nil { + select { + case s.asyncGenChan <- index: + default: + } + } +} + // Finalise it will put target write set into pending writes. func (s *MVStates) Finalise(index int) error { - log.Debug("Finalise", "total", len(s.rwSets), "index", index) s.lock.Lock() + defer s.lock.Unlock() + + // just finalise all previous txs + for i := s.nextFinaliseIndex; i <= index; i++ { + if err := s.innerFinalise(i); err != nil { + return err + } + s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + } + return nil +} + +func (s *MVStates) innerFinalise(index int) error { rwSet := s.rwSets[index] if rwSet == nil { - s.lock.Unlock() return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) } if index != s.nextFinaliseIndex { - s.lock.Unlock() return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) } // append to pending write set - for k, v := range rwSet.writeSet { - if _, exist := s.pendingWriteSet[k]; !exist { - s.pendingWriteSet[k] = NewPendingWrites() + for addr, sub := range rwSet.accWriteSet { + if _, exist := s.pendingAccWriteSet[addr]; !exist { + s.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + } + for state, item := range sub { + if _, exist := s.pendingAccWriteSet[addr][state]; !exist { + s.pendingAccWriteSet[addr][state] = NewPendingWrites() + } + s.pendingAccWriteSet[addr][state].Append(&item) } - s.pendingWriteSet[k].Append(v) } - s.nextFinaliseIndex++ - s.lock.Unlock() - // async resolve dependency, but non-block action - if s.asyncRunning && s.depsGenChan != nil { - s.depsGenChan <- index + for k, sub := range rwSet.slotWriteSet { + if _, exist := s.pendingSlotWriteSet[k]; !exist { + s.pendingSlotWriteSet[k] = make(map[common.Hash]*PendingWrites) + } + for slot, item := range sub { + if _, exist := s.pendingSlotWriteSet[k][slot]; !exist { + s.pendingSlotWriteSet[k][slot] = NewPendingWrites() + } + s.pendingSlotWriteSet[k][slot].Append(&item) + } } + s.nextFinaliseIndex++ return nil } -func (s *MVStates) resolveDepsCacheByWrites(index int, rwSet *RWSet) { +func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *PendingWrites { + if _, exist := s.pendingAccWriteSet[addr]; !exist { + return nil + } + return s.pendingAccWriteSet[addr][state] +} + +func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *PendingWrites { + if _, exist := s.pendingSlotWriteSet[addr]; !exist { + return nil + } + return s.pendingSlotWriteSet[addr][slot] +} + +// resolveDepsMapCacheByWrites must be executed in order +func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - s.depMapCache[index] = NewTxDeps(8) + depMap := NewTxDepMap(0) if rwSet.excludedTx { + s.txDepCache = append(s.txDepCache, depMap) return } - seen := make(map[int]struct{}, 8) // check tx dependency, only check key, skip version - if len(s.pendingWriteSet) > len(rwSet.readSet) { - for key := range rwSet.readSet { + for addr, sub := range rwSet.accReadSet { + for state := range sub { // check self destruct - if key.IsAccountSelf() { - key = AccountStateKey(key.Addr(), AccountSuicide) + if state == AccountSelf { + state = AccountSuicide } - writes := s.pendingWriteSet[key] + writes := s.queryAccWrites(addr, state) if writes == nil { continue } items := writes.FindPrevWrites(index) for _, item := range items { - seen[item.TxIndex()] = struct{}{} + depMap.add(uint64(item.TxIndex())) } } - } else { - for k, w := range s.pendingWriteSet { - // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. - if k.IsAccountSuicide() { - k = k.ToAccountSelf() - } - if _, ok := rwSet.readSet[k]; !ok { + } + for addr, sub := range rwSet.slotReadSet { + for slot := range sub { + writes := s.querySlotWrites(addr, slot) + if writes == nil { continue } - items := w.FindPrevWrites(index) + items := writes.FindPrevWrites(index) for _, item := range items { - seen[item.TxIndex()] = struct{}{} + depMap.add(uint64(item.TxIndex())) } } } - for prev := 0; prev < index; prev++ { - if _, ok := seen[prev]; !ok { - continue - } - s.depMapCache[index].add(prev) - // clear redundancy deps compared with prev - for dep := range s.depMapCache[index] { - if s.depMapCache[prev].exist(dep) { - s.depMapCache[index].remove(dep) - } + // clear redundancy deps compared with prev + preDeps := depMap.deps() + for _, prev := range preDeps { + for _, tx := range s.txDepCache[prev].deps() { + depMap.remove(tx) } } - s.depsCache[index] = s.depMapCache[index].toArray() + s.txDepCache = append(s.txDepCache, depMap) } +// resolveDepsCache must be executed in order func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - s.depMapCache[index] = NewTxDeps(0) + depMap := NewTxDepMap(0) if rwSet.excludedTx { + s.txDepCache = append(s.txDepCache, depMap) return } for prev := 0; prev < index; prev++ { @@ -534,40 +612,26 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { continue } // check if there has written op before i - if checkDependency(prevSet.writeSet, rwSet.readSet) { - s.depMapCache[index].add(prev) + if checkAccDependency(prevSet.accWriteSet, rwSet.accReadSet) { + depMap.add(uint64(prev)) // clear redundancy deps compared with prev - for dep := range s.depMapCache[index] { - if s.depMapCache[prev].exist(dep) { - s.depMapCache[index].remove(dep) + for _, dep := range depMap.deps() { + if s.txDepCache[prev].exist(dep) { + depMap.remove(dep) + } + } + } + if checkSlotDependency(prevSet.slotWriteSet, rwSet.slotReadSet) { + depMap.add(uint64(prev)) + // clear redundancy deps compared with prev + for _, dep := range depMap.deps() { + if s.txDepCache[prev].exist(dep) { + depMap.remove(dep) } } } } - s.depsCache[index] = s.depMapCache[index].toArray() -} - -func checkRWSetInconsistent(index int, k RWKey, readSet map[RWKey]*RWItem, writeSet map[RWKey]*RWItem) bool { - var ( - readOk bool - writeOk bool - r *RWItem - ) - - if k.IsAccountSuicide() { - _, readOk = readSet[k.ToAccountSelf()] - } else { - _, readOk = readSet[k] - } - - r, writeOk = writeSet[k] - if readOk != writeOk { - // check if it's correct? read nil, write non-nil - log.Warn("checkRWSetInconsistent find inconsistent", "tx", index, "k", k.String(), "read", readOk, "write", writeOk, "val", r.Val) - return true - } - - return false + s.txDepCache = append(s.txDepCache, depMap) } // ResolveTxDAG generate TxDAG from RWSets @@ -577,27 +641,34 @@ func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (Tx if len(s.rwSets) != txCnt { return nil, fmt.Errorf("wrong rwSet count, expect: %v, actual: %v", txCnt, len(s.rwSets)) } - if txCnt != s.nextFinaliseIndex { - return nil, fmt.Errorf("resolve in wrong order, next: %d, input: %d", s.nextFinaliseIndex, txCnt) + s.stopAsyncGen() + // collect all rw sets, try to finalise them + for i := s.nextFinaliseIndex; i < txCnt; i++ { + if err := s.innerFinalise(i); err != nil { + return nil, err + } } - s.stopAsyncDepGen() - txDAG := NewPlainTxDAG(len(s.rwSets)) + + txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { // check if there are RW with gas fee receiver for gas delay calculation for _, addr := range gasFeeReceivers { - if _, ok := s.rwSets[i].readSet[AccountStateKey(addr, AccountSelf)]; ok { + if _, ok := s.rwSets[i].accReadSet[addr]; !ok { + continue + } + if _, ok := s.rwSets[i].accReadSet[addr][AccountSelf]; ok { return NewEmptyTxDAG(), nil } } txDAG.TxDeps[i].TxIndexes = []uint64{} + if len(s.txDepCache) <= i { + s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + } if s.rwSets[i].excludedTx { txDAG.TxDeps[i].SetFlag(ExcludedTxFlag) continue } - if s.depMapCache[i] == nil { - s.resolveDepsCacheByWrites(i, s.rwSets[i]) - } - deps := s.depsCache[i] + deps := s.txDepCache[i].deps() if len(deps) <= (txCnt-1)/2 { txDAG.TxDeps[i].TxIndexes = deps continue @@ -614,48 +685,92 @@ func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (Tx return txDAG, nil } -func checkDependency(writeSet map[RWKey]*RWItem, readSet map[RWKey]*RWItem) bool { +func checkAccDependency(writeSet map[common.Address]map[AccountState]RWItem, readSet map[common.Address]map[AccountState]RWItem) bool { // check tx dependency, only check key, skip version - for k, _ := range writeSet { - // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. - if k.IsAccountSuicide() { - if _, ok := readSet[k.ToAccountSelf()]; ok { + for addr, sub := range writeSet { + if _, ok := readSet[addr]; !ok { + continue + } + for state := range sub { + // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. + if state == AccountSuicide { + if _, ok := readSet[addr][AccountSelf]; ok { + return true + } + continue + } + if _, ok := readSet[addr][state]; ok { return true } + } + } + + return false +} + +func checkSlotDependency(writeSet map[common.Address]map[common.Hash]RWItem, readSet map[common.Address]map[common.Hash]RWItem) bool { + // check tx dependency, only check key, skip version + for addr, sub := range writeSet { + if _, ok := readSet[addr]; !ok { continue } - if _, ok := readSet[k]; ok { - return true + for slot := range sub { + if _, ok := readSet[addr][slot]; ok { + return true + } } } return false } -type TxDepMap map[int]struct{} +type TxDepMaker interface { + add(index uint64) + exist(index uint64) bool + deps() []uint64 + remove(index uint64) + len() int +} -func NewTxDeps(cap int) TxDepMap { - return make(map[int]struct{}, cap) +type TxDepMap struct { + tm map[uint64]struct{} + cache []uint64 } -func (m TxDepMap) add(index int) { - m[index] = struct{}{} +func NewTxDepMap(cap int) *TxDepMap { + return &TxDepMap{ + tm: make(map[uint64]struct{}, cap), + } } -func (m TxDepMap) exist(index int) bool { - _, ok := m[index] +func (m *TxDepMap) add(index uint64) { + m.cache = nil + m.tm[index] = struct{}{} +} + +func (m *TxDepMap) exist(index uint64) bool { + _, ok := m.tm[index] return ok } -func (m TxDepMap) toArray() []uint64 { - ret := make([]uint64, 0, len(m)) - for index := range m { - ret = append(ret, uint64(index)) +func (m *TxDepMap) deps() []uint64 { + if m.cache != nil { + return m.cache + } + res := make([]uint64, 0, len(m.tm)) + for index := range m.tm { + res = append(res, index) } - slices.Sort(ret) - return ret + slices.Sort(res) + m.cache = res + return m.cache +} + +func (m *TxDepMap) remove(index uint64) { + m.cache = nil + delete(m.tm, index) } -func (m TxDepMap) remove(index int) { - delete(m, index) +func (m *TxDepMap) len() int { + return len(m.tm) } diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 7a0e16db8c..792f8cd031 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/cometbft/cometbft/libs/rand" "github.com/golang/snappy" @@ -14,53 +16,66 @@ import ( "github.com/stretchr/testify/require" ) -const mockRWSetSize = 5000 +const ( + mockRWSetSize = 5000 +) func TestMVStates_BasicUsage(t *testing.T) { ms := NewMVStates(0) - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(0, []interface{}{"0x00", 0}, []interface{}{"0x00", 0}), nil)) - require.Nil(t, ms.ReadState(0, str2key("0x00"))) + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(0, []interface{}{AccountSelf, nil, AccountBalance, 0, "0x00", 0}, []interface{}{AccountBalance, 0, "0x00", 0}), nil)) + require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) + require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) require.NoError(t, ms.Finalise(0)) - require.Error(t, ms.Finalise(0)) + require.Error(t, ms.FulfillRWSet(mockRWSetWithVal(0, nil, nil), nil)) - require.Nil(t, ms.ReadState(0, str2key("0x00"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadState(1, str2key("0x00"))) + require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) + require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadAccState(1, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadSlotState(1, common.Address{}, str2Slot("0x00"))) - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(1, []interface{}{"0x01", 1}, []interface{}{"0x01", 1}), nil)) - require.Nil(t, ms.ReadState(1, str2key("0x01"))) + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(1, []interface{}{AccountSelf, nil, AccountBalance, 0, "0x01", 1}, []interface{}{AccountBalance, 1, "0x01", 1}), nil)) + require.Nil(t, ms.ReadSlotState(1, common.Address{}, str2Slot("0x01"))) require.NoError(t, ms.Finalise(1)) - require.Nil(t, ms.ReadState(0, str2key("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) + require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(2, []interface{}{"0x02", 2, "0x01", 1}, []interface{}{"0x01", 2, "0x02", 2}), nil)) + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(2, []interface{}{AccountSelf, nil, AccountBalance, 1, "0x02", 2, "0x01", 1}, []interface{}{AccountBalance, 2, "0x01", 2, "0x02", 2}), nil)) require.NoError(t, ms.Finalise(2)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadState(3, str2key("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadAccState(3, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadSlotState(3, common.Address{}, str2Slot("0x01"))) - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(3, []interface{}{"0x03", 3, "0x00", 0, "0x01", 2}, []interface{}{"0x00", 3, "0x01", 3, "0x03", 3}), nil)) - require.Nil(t, ms.ReadState(3, str2key("0x03"))) + require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(3, []interface{}{AccountSelf, nil, AccountBalance, 2, "0x03", 3, "0x00", 0, "0x01", 2}, []interface{}{AccountBalance, 3, "0x00", 3, "0x01", 3, "0x03", 3}), nil)) + require.Nil(t, ms.ReadSlotState(3, common.Address{}, str2Slot("0x03"))) require.NoError(t, ms.Finalise(3)) - require.Nil(t, ms.ReadState(0, str2key("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadState(2, str2key("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadState(3, str2key("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadState(4, str2key("0x01"))) - require.Nil(t, ms.ReadState(0, str2key("0x00"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadState(5, str2key("0x00"))) + require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadSlotState(3, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadAccState(3, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadSlotState(4, common.Address{}, str2Slot("0x01"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadAccState(4, common.Address{}, AccountBalance)) + require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) + require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadSlotState(5, common.Address{}, str2Slot("0x00"))) + require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadAccState(5, common.Address{}, AccountBalance)) } func TestMVStates_SimpleResolveTxDAG(t *testing.T) { ms := NewMVStates(10) finaliseRWSets(t, ms, []*RWSet{ - mockRWSet(0, []string{"0x00"}, []string{"0x00"}), - mockRWSet(1, []string{"0x01"}, []string{"0x01"}), - mockRWSet(2, []string{"0x02"}, []string{"0x02"}), - mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), - mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), - mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), - mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), - mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), - mockRWSet(8, []string{"0x08"}, []string{"0x08"}), - mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), + mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), + mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), + mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), + mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), + mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), + mockRWSet(7, []interface{}{"0x06", "0x07"}, []interface{}{"0x07"}), + mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), + mockRWSet(9, []interface{}{"0x08", "0x09"}, []interface{}{"0x09"}), }) dag, err := ms.ResolveTxDAG(10, nil) @@ -70,18 +85,18 @@ func TestMVStates_SimpleResolveTxDAG(t *testing.T) { } func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { - ms := NewMVStates(10).EnableAsyncDepGen() + ms := NewMVStates(10).EnableAsyncGen() finaliseRWSets(t, ms, []*RWSet{ - mockRWSet(0, []string{"0x00"}, []string{"0x00"}), - mockRWSet(1, []string{"0x01"}, []string{"0x01"}), - mockRWSet(2, []string{"0x02"}, []string{"0x02"}), - mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), - mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), - mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), - mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), - mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), - mockRWSet(8, []string{"0x08"}, []string{"0x08"}), - mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), + mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), + mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), + mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), + mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), + mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), + mockRWSet(7, []interface{}{"0x06", "0x07"}, []interface{}{"0x07"}), + mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), + mockRWSet(9, []interface{}{"0x08", "0x09"}, []interface{}{"0x09"}), }) time.Sleep(10 * time.Millisecond) @@ -96,7 +111,7 @@ func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { func TestMVStates_ResolveTxDAG_Async(t *testing.T) { txCnt := 10000 rwSets := mockRandomRWSet(txCnt) - ms1 := NewMVStates(txCnt).EnableAsyncDepGen() + ms1 := NewMVStates(txCnt).EnableAsyncGen() for i := 0; i < txCnt; i++ { require.NoError(t, ms1.FulfillRWSet(rwSets[i], nil)) require.NoError(t, ms1.Finalise(i)) @@ -118,7 +133,7 @@ func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { } d1 := resolveTxDAGInMVStates(ms1) - d2 := resolveTxDAGByWritesInMVStates(ms2) + d2 := resolveDepsMapCacheByWritesInMVStates(ms2) require.Equal(t, d1.(*PlainTxDAG).String(), d2.(*PlainTxDAG).String()) } @@ -130,7 +145,7 @@ func TestMVStates_TxDAG_Compression(t *testing.T) { ms1.rwSets[i] = rwSet ms1.Finalise(i) } - dag := resolveTxDAGByWritesInMVStates(ms1) + dag := resolveDepsMapCacheByWritesInMVStates(ms1) enc, err := EncodeTxDAG(dag) require.NoError(t, err) @@ -161,6 +176,7 @@ func BenchmarkResolveTxDAGInMVStates(b *testing.B) { for i, rwSet := range rwSets { ms1.rwSets[i] = rwSet } + b.ResetTimer() for i := 0; i < b.N; i++ { resolveTxDAGInMVStates(ms1) } @@ -171,20 +187,22 @@ func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { ms1 := NewMVStates(mockRWSetSize) for i, rwSet := range rwSets { ms1.rwSets[i] = rwSet - ms1.Finalise(i) + ms1.innerFinalise(i) } + b.ResetTimer() for i := 0; i < b.N; i++ { - resolveTxDAGByWritesInMVStates(ms1) + resolveDepsMapCacheByWritesInMVStates(ms1) } } func BenchmarkMVStates_Finalise(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize) + b.ResetTimer() for i := 0; i < b.N; i++ { for k, rwSet := range rwSets { ms1.rwSets[k] = rwSet - ms1.Finalise(k) + ms1.innerFinalise(k) } } } @@ -193,16 +211,16 @@ func resolveTxDAGInMVStates(s *MVStates) TxDAG { txDAG := NewPlainTxDAG(len(s.rwSets)) for i := 0; i < len(s.rwSets); i++ { s.resolveDepsCache(i, s.rwSets[i]) - txDAG.TxDeps[i].TxIndexes = s.depsCache[i] + txDAG.TxDeps[i].TxIndexes = s.txDepCache[i].deps() } return txDAG } -func resolveTxDAGByWritesInMVStates(s *MVStates) TxDAG { +func resolveDepsMapCacheByWritesInMVStates(s *MVStates) TxDAG { txDAG := NewPlainTxDAG(len(s.rwSets)) for i := 0; i < len(s.rwSets); i++ { - s.resolveDepsCacheByWrites(i, s.rwSets[i]) - txDAG.TxDeps[i].TxIndexes = s.depsCache[i] + s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + txDAG.TxDeps[i].TxIndexes = s.txDepCache[i].deps() } return txDAG } @@ -210,18 +228,18 @@ func resolveTxDAGByWritesInMVStates(s *MVStates) TxDAG { func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { ms := NewMVStates(12) finaliseRWSets(t, ms, []*RWSet{ - mockRWSet(0, []string{"0x00"}, []string{"0x00"}), - mockRWSet(1, []string{"0x01"}, []string{"0x01"}), - mockRWSet(2, []string{"0x02"}, []string{"0x02"}), - mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), - mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), - mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), - mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), - mockRWSet(7, []string{"0x06", "0x07"}, []string{"0x07"}), - mockRWSet(8, []string{"0x08"}, []string{"0x08"}), - mockRWSet(9, []string{"0x08", "0x09"}, []string{"0x09"}), - mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithExcludedTxFlag(), - mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithExcludedTxFlag(), + mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), + mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), + mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), + mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), + mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), + mockRWSet(7, []interface{}{"0x06", "0x07"}, []interface{}{"0x07"}), + mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), + mockRWSet(9, []interface{}{"0x08", "0x09"}, []interface{}{"0x09"}), + mockRWSet(10, []interface{}{"0x10"}, []interface{}{"0x10"}).WithExcludedTxFlag(), + mockRWSet(11, []interface{}{"0x11"}, []interface{}{"0x11"}).WithExcludedTxFlag(), }) dag, err := ms.ResolveTxDAG(12, nil) @@ -233,18 +251,18 @@ func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { ms := NewMVStates(12) finaliseRWSets(t, ms, []*RWSet{ - mockRWSet(0, []string{"0x00"}, []string{"0x00"}), - mockRWSet(1, []string{"0x01"}, []string{"0x01"}), - mockRWSet(2, []string{"0x02"}, []string{"0x02"}), - mockRWSet(3, []string{"0x00", "0x03"}, []string{"0x03"}), - mockRWSet(4, []string{"0x00", "0x04"}, []string{"0x04"}), - mockRWSet(5, []string{"0x01", "0x02", "0x05"}, []string{"0x05"}), - mockRWSet(6, []string{"0x02", "0x05", "0x06"}, []string{"0x06"}), - mockRWSet(7, []string{"0x00", "0x03", "0x07"}, []string{"0x07"}), - mockRWSet(8, []string{"0x08"}, []string{"0x08"}), - mockRWSet(9, []string{"0x00", "0x01", "0x02", "0x06", "0x07", "0x08", "0x09"}, []string{"0x09"}), - mockRWSet(10, []string{"0x10"}, []string{"0x10"}).WithExcludedTxFlag(), - mockRWSet(11, []string{"0x11"}, []string{"0x11"}).WithExcludedTxFlag(), + mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), + mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), + mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), + mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), + mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), + mockRWSet(7, []interface{}{"0x00", "0x03", "0x07"}, []interface{}{"0x07"}), + mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), + mockRWSet(9, []interface{}{"0x00", "0x01", "0x02", "0x06", "0x07", "0x08", "0x09"}, []interface{}{"0x09"}), + mockRWSet(10, []interface{}{"0x10"}, []interface{}{"0x10"}).WithExcludedTxFlag(), + mockRWSet(11, []interface{}{"0x11"}, []interface{}{"0x11"}).WithExcludedTxFlag(), }) dag, err := ms.ResolveTxDAG(12, nil) require.NoError(t, err) @@ -254,91 +272,91 @@ func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { func TestIsEqualRWVal(t *testing.T) { tests := []struct { - key RWKey + key *AccountState src interface{} compared interface{} isEqual bool }{ { - key: AccountStateKey(mockAddr, AccountNonce), + key: &AccountNonce, src: uint64(0), compared: uint64(0), isEqual: true, }, { - key: AccountStateKey(mockAddr, AccountNonce), + key: &AccountNonce, src: uint64(0), compared: uint64(1), isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountBalance), + key: &AccountBalance, src: new(uint256.Int).SetUint64(1), compared: new(uint256.Int).SetUint64(1), isEqual: true, }, { - key: AccountStateKey(mockAddr, AccountBalance), + key: &AccountBalance, src: nil, compared: new(uint256.Int).SetUint64(1), isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountBalance), + key: &AccountBalance, src: (*uint256.Int)(nil), compared: new(uint256.Int).SetUint64(1), isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountBalance), + key: &AccountBalance, src: (*uint256.Int)(nil), compared: (*uint256.Int)(nil), isEqual: true, }, { - key: AccountStateKey(mockAddr, AccountCodeHash), + key: &AccountCodeHash, src: []byte{1}, compared: []byte{1}, isEqual: true, }, { - key: AccountStateKey(mockAddr, AccountCodeHash), + key: &AccountCodeHash, src: nil, compared: []byte{1}, isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountCodeHash), + key: &AccountCodeHash, src: ([]byte)(nil), compared: []byte{1}, isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountCodeHash), + key: &AccountCodeHash, src: ([]byte)(nil), compared: ([]byte)(nil), isEqual: true, }, { - key: AccountStateKey(mockAddr, AccountSuicide), + key: &AccountSuicide, src: struct{}{}, compared: struct{}{}, isEqual: false, }, { - key: AccountStateKey(mockAddr, AccountSuicide), + key: &AccountSuicide, src: nil, compared: struct{}{}, isEqual: false, }, { - key: StorageStateKey(mockAddr, mockHash), + key: nil, src: mockHash, compared: mockHash, isEqual: true, }, { - key: StorageStateKey(mockAddr, mockHash), + key: nil, src: nil, compared: mockHash, isEqual: false, @@ -350,27 +368,55 @@ func TestIsEqualRWVal(t *testing.T) { } } -func mockRWSet(index int, read []string, write []string) *RWSet { +func mockRWSet(index int, read []interface{}, write []interface{}) *RWSet { ver := StateVersion{ TxIndex: index, } set := NewRWSet(ver) + set.accReadSet[common.Address{}] = map[AccountState]RWItem{} + set.accWriteSet[common.Address{}] = map[AccountState]RWItem{} + set.slotReadSet[common.Address{}] = map[common.Hash]RWItem{} + set.slotWriteSet[common.Address{}] = map[common.Hash]RWItem{} for _, k := range read { - set.readSet[str2key(k)] = &RWItem{ - Ver: ver, - Val: struct{}{}, + state, ok := k.(AccountState) + if ok { + set.accReadSet[common.Address{}][state] = RWItem{ + Ver: ver, + Val: struct{}{}, + } + } else { + set.slotReadSet[common.Address{}][str2Slot(k.(string))] = RWItem{ + Ver: ver, + Val: struct{}{}, + } } } for _, k := range write { - set.writeSet[str2key(k)] = &RWItem{ - Ver: ver, - Val: struct{}{}, + state, ok := k.(AccountState) + if ok { + set.accWriteSet[common.Address{}][state] = RWItem{ + Ver: ver, + Val: struct{}{}, + } + } else { + set.slotWriteSet[common.Address{}][str2Slot(k.(string))] = RWItem{ + Ver: ver, + Val: struct{}{}, + } } } return set } +func mockUintSlice(cnt int) []uint64 { + ret := make([]uint64, cnt) + for i := 0; i < cnt; i++ { + ret[i] = rand.Uint64() % uint64(cnt) + } + return ret +} + func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet { ver := StateVersion{ TxIndex: index, @@ -384,19 +430,43 @@ func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet panic("wrong write size") } + set.accReadSet[common.Address{}] = map[AccountState]RWItem{} + set.slotReadSet[common.Address{}] = map[common.Hash]RWItem{} + set.accWriteSet[common.Address{}] = map[AccountState]RWItem{} + set.slotWriteSet[common.Address{}] = map[common.Hash]RWItem{} for i := 0; i < len(read); { - set.readSet[str2key(read[i].(string))] = &RWItem{ - Ver: StateVersion{ - TxIndex: index - 1, - }, - Val: read[i+1], + state, ok := read[i].(AccountState) + if ok { + set.accReadSet[common.Address{}][state] = RWItem{ + Ver: StateVersion{ + TxIndex: index - 1, + }, + Val: read[i+1], + } + } else { + slot := str2Slot(read[i].(string)) + set.slotReadSet[common.Address{}][slot] = RWItem{ + Ver: StateVersion{ + TxIndex: index - 1, + }, + Val: read[i+1], + } } i += 2 } for i := 0; i < len(write); { - set.writeSet[str2key(write[i].(string))] = &RWItem{ - Ver: ver, - Val: write[i+1], + state, ok := write[i].(AccountState) + if ok { + set.accWriteSet[common.Address{}][state] = RWItem{ + Ver: ver, + Val: write[i+1], + } + } else { + slot := str2Slot(write[i].(string)) + set.slotWriteSet[common.Address{}][slot] = RWItem{ + Ver: ver, + Val: write[i+1], + } } i += 2 } @@ -404,11 +474,15 @@ func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet return set } +func str2Slot(str string) common.Hash { + return common.BytesToHash([]byte(str)) +} + func mockRandomRWSet(count int) []*RWSet { var ret []*RWSet for i := 0; i < count; i++ { - read := []string{fmt.Sprintf("0x%d", i)} - write := []string{fmt.Sprintf("0x%d", i)} + read := []interface{}{fmt.Sprintf("0x%d", i)} + write := []interface{}{fmt.Sprintf("0x%d", i)} if i != 0 && rand.Bool() { depCnt := rand.Int()%i + 1 last := 0 @@ -448,12 +522,3 @@ func randInRange(i, j int) (int, bool) { } return rand.Int()%(j-i) + i, true } - -func str2key(k string) RWKey { - key := RWKey{} - if len(k) > len(key) { - k = k[:len(key)] - } - copy(key[:], k) - return key -} From 38040aacc9098eff617b2ce01c29151181cf764e Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Fri, 30 Aug 2024 17:08:51 +0800 Subject: [PATCH 10/42] add gaslimit reservation --- miner/worker.go | 51 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index b9254d8051..7548ac0e29 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -124,17 +124,20 @@ type environment struct { blobs int UnRevertible mapset.Set[common.Hash] + + gasForTxDAG uint64 // gas reserved for the txdag } // copy creates a deep copy of environment. func (env *environment) copy() *environment { cpy := &environment{ - signer: env.signer, - state: env.state.CopyWithMvStates(), - tcount: env.tcount, - coinbase: env.coinbase, - header: types.CopyHeader(env.header), - receipts: copyReceipts(env.receipts), + signer: env.signer, + state: env.state.CopyWithMvStates(), + tcount: env.tcount, + coinbase: env.coinbase, + header: types.CopyHeader(env.header), + receipts: copyReceipts(env.receipts), + gasForTxDAG: env.gasForTxDAG, } if env.gasPool != nil { gasPool := *env.gasPool @@ -930,7 +933,7 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac } } // If we don't have enough gas for any further transactions then we're done. - if env.gasPool.Gas() < params.TxGas { + if env.gasPool.Gas()-env.gasForTxDAG < params.TxGas { log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } @@ -966,7 +969,7 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac } txTotalMeter.Mark(1) // If we don't have enough space for the next transaction, skip the account. - if env.gasPool.Gas() < ltx.Gas { + if env.gasPool.Gas()-env.gasForTxDAG < ltx.Gas { log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas) txs.Pop() txErrNotenoughgasMeter.Mark(1) @@ -1049,7 +1052,7 @@ func (w *worker) appendTxDAG(env *environment) { return } // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB - txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase) + txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase, env.gasForTxDAG) if err != nil { log.Warn("failed to generate DAG tx", "err", err) return @@ -1063,7 +1066,7 @@ func (w *worker) appendTxDAG(env *environment) { } // generateDAGTx generates a DAG transaction for the block -func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, coinbase common.Address) (*types.Transaction, error) { +func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, coinbase common.Address, gasLimitForDag uint64) (*types.Transaction, error) { if statedb == nil { return nil, fmt.Errorf("failed to get state db, env.state=nil") } @@ -1074,7 +1077,6 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn //privateKey, err := crypto.HexToECDSA(privateKeyHex) sender := w.config.ParallelTxDAGSenderPriv - receiver := DefaultTxDAGAddress if sender == nil { return nil, fmt.Errorf("missing sender private key") } @@ -1103,7 +1105,14 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn } // Create the transaction - tx := types.NewTransaction(nonce, receiver, big.NewInt(0), 21100, big.NewInt(0), data) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: &DefaultTxDAGAddress, + Value: big.NewInt(0), + Gas: gasLimitForDag, + GasPrice: big.NewInt(0), + Data: data, + }) // Sign the transaction with the private key signedTx, err := types.SignTx(tx, signer, sender) @@ -1373,6 +1382,16 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { // forced transactions done, fill rest of block with transactions if !genParams.noTxs { + // reserve gas for TxDAG + work.gasForTxDAG = 0 + if w.chain.TxDAGEnabledWhenMine() { + // We reserved n% of the header.GasLimit for TxDAG data, because: + // 1. a 10k-transactions block will need at most 64k bytes for its txdag data. a 10k-transactions block usually cost 500M gas; + // 2. before EIP-2028, it cost 68 gas per non-zero byte, and after EIP-2028, it cost 16 gas; + // 3. the gas for a n-bytes txdag transaction is calculated by: 64*68 = 4352, rate: 4352/500000000 = 0.0000087; + // for just in case, we finally reserved 0.00002% gas for TxDAG data. + work.gasForTxDAG = work.header.GasLimit/50000 + 21000 + } // use shared interrupt if present interrupt := genParams.interrupt if interrupt == nil { @@ -1414,15 +1433,15 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { isBuildBlockInterruptCounter.Inc(1) } } + if w.chain.TxDAGEnabledWhenMine() { + // append a DAG tx at the end of the block + w.appendTxDAG(work) + } } if intr := genParams.interrupt; intr != nil && genParams.isUpdate && intr.Load() != commitInterruptNone { return &newPayloadResult{err: errInterruptedUpdate} } - if w.chain.TxDAGEnabledWhenMine() { - // append a DAG tx at the end of the block - w.appendTxDAG(work) - } start = time.Now() block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, genParams.withdrawals) From 9eb6f8ff9f04bad1ca5eb650d1b99d240cad87ef Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Fri, 30 Aug 2024 19:25:21 +0800 Subject: [PATCH 11/42] fix: estimate the gas for TxDAG transaction --- miner/worker.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 7548ac0e29..a0da863840 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1385,12 +1385,20 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { // reserve gas for TxDAG work.gasForTxDAG = 0 if w.chain.TxDAGEnabledWhenMine() { - // We reserved n% of the header.GasLimit for TxDAG data, because: - // 1. a 10k-transactions block will need at most 64k bytes for its txdag data. a 10k-transactions block usually cost 500M gas; - // 2. before EIP-2028, it cost 68 gas per non-zero byte, and after EIP-2028, it cost 16 gas; - // 3. the gas for a n-bytes txdag transaction is calculated by: 64*68 = 4352, rate: 4352/500000000 = 0.0000087; - // for just in case, we finally reserved 0.00002% gas for TxDAG data. - work.gasForTxDAG = work.header.GasLimit/50000 + 21000 + // a 10k-transactions block need at most 64kB to store its transaction + // TxDAG transaction is a legacy transaction, so its accessList is nil, and no need to pay for accessList. + // gasForTxDAG = params.TxGas + len(TxDAGBytes) x params.TxDataNonZeroGasFrontier + // 1. a 10k-transactions block consumes about 500M gas, which needs 64kB to store its TxData + // 3. a 4k-transactions block consumes about 200M gas, which needs 32kB to store its TxData + // 2. a 2k-transactions block consumes about 100M gas, which needs 14kB to store its TxData + // 3. and so on ... + // so we can estimate that a n-gaslimit block needs a TxDAG data byte of length: n/100M x 14kB + // it's cost totally about 0.003 ~ 0.01 of the header.GasLimit + if w.chainConfig.IsIstanbul(work.header.Number) { + work.gasForTxDAG = (work.header.GasLimit/100000000*14*1024)*params.TxDataNonZeroGasEIP2028 + params.TxGas + } else { + work.gasForTxDAG = (work.header.GasLimit/100000000*14*1024)*params.TxDataNonZeroGasFrontier + params.TxGas + } } // use shared interrupt if present interrupt := genParams.interrupt From 7332f069fd9f741239225fd9745721399ea95f36 Mon Sep 17 00:00:00 2001 From: andyzhang2023 Date: Thu, 5 Sep 2024 17:09:32 +0800 Subject: [PATCH 12/42] fix: correctly estimate the max gaslimit for TxDAG data --- miner/worker.go | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index a0da863840..21804eb0cd 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1334,6 +1334,27 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err return nil } +func (w *worker) estimateGasForTxDAG(env *environment) uint64 { + var gas uint64 = 0 + if w.chain.TxDAGEnabledWhenMine() { + // 1. a 10k-transactions block need at most 64kB to store its transaction, and its data size grows linearly with the number of transactions + // 2. 100M gaslimit block can include at most 4761 = (100M/21000) transactions + // + // the total gas for TxDAG is calculated as follows: + // + // MaxBytesPerTx = 64 * 1024 / 10000 = 6.5 bytes ~ 7 bytes + // MaxTxsCanInclude uint64 = GasLimit / 21000 + // total = MaxBytesPerTx * NoZeroGas * MaxTxsCanInclude + params.TxGas + // + if w.chainConfig.IsIstanbul(env.header.Number) { + gas = 7*params.TxDataNonZeroGasEIP2028*(env.header.GasLimit/21000) + params.TxGas + } else { + gas = 7*params.TxDataNonZeroGasFrontier*(env.header.GasLimit/21000) + params.TxGas + } + } + return gas +} + // generateWork generates a sealing block based on the given parameters. func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { // TODO delete after debug performance metrics @@ -1383,23 +1404,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { // forced transactions done, fill rest of block with transactions if !genParams.noTxs { // reserve gas for TxDAG - work.gasForTxDAG = 0 - if w.chain.TxDAGEnabledWhenMine() { - // a 10k-transactions block need at most 64kB to store its transaction - // TxDAG transaction is a legacy transaction, so its accessList is nil, and no need to pay for accessList. - // gasForTxDAG = params.TxGas + len(TxDAGBytes) x params.TxDataNonZeroGasFrontier - // 1. a 10k-transactions block consumes about 500M gas, which needs 64kB to store its TxData - // 3. a 4k-transactions block consumes about 200M gas, which needs 32kB to store its TxData - // 2. a 2k-transactions block consumes about 100M gas, which needs 14kB to store its TxData - // 3. and so on ... - // so we can estimate that a n-gaslimit block needs a TxDAG data byte of length: n/100M x 14kB - // it's cost totally about 0.003 ~ 0.01 of the header.GasLimit - if w.chainConfig.IsIstanbul(work.header.Number) { - work.gasForTxDAG = (work.header.GasLimit/100000000*14*1024)*params.TxDataNonZeroGasEIP2028 + params.TxGas - } else { - work.gasForTxDAG = (work.header.GasLimit/100000000*14*1024)*params.TxDataNonZeroGasFrontier + params.TxGas - } - } + work.gasForTxDAG = w.estimateGasForTxDAG(work) // use shared interrupt if present interrupt := genParams.interrupt if interrupt == nil { From 0001951cd0c7bf30a189a3632e4242cb5ae1b732 Mon Sep 17 00:00:00 2001 From: galaio Date: Tue, 3 Sep 2024 20:32:19 +0800 Subject: [PATCH 13/42] txdag: add metrics when mining; --- miner/worker.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/miner/worker.go b/miner/worker.go index 21804eb0cd..1c191f9d45 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1088,6 +1088,9 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn } // txIndex is the index of this txDAG transaction txDAG.SetTxDep(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) + if metrics.EnabledExpensive { + go types.EvaluateTxDAGPerformance(txDAG, statedb.ResolveStats()) + } publicKey := sender.Public() publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) From 4b57be33d0a8b5d75f56eda7d7986d6ccae270fb Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 4 Sep 2024 10:57:48 +0800 Subject: [PATCH 14/42] txdag: fix mvstates copy issue; --- core/state/statedb.go | 8 +++++--- core/state_processor.go | 2 +- core/types/mvstates.go | 45 +++++++++++++++++++++++++++++++++++++++++ miner/worker.go | 3 +++ 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index dcbc4ccaf2..18be0e2f4b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -787,7 +787,7 @@ func (s *StateDB) CreateAccount(addr common.Address) { func (s *StateDB) CopyWithMvStates() *StateDB { state := s.Copy() if s.mvStates != nil { - state.mvStates = s.mvStates + state.mvStates = s.mvStates.Copy() } return state } @@ -1755,18 +1755,20 @@ func (s *StateDB) RecordStorageWrite(addr common.Address, slot common.Hash, val s.rwSet.RecordStorageWrite(addr, slot, val) } -func (s *StateDB) ResetMVStates(txCount int) { +func (s *StateDB) ResetMVStates(txCount int) *types.MVStates { if s.mvStates != nil { s.mvStates.Stop() } - s.mvStates = types.NewMVStates(txCount).EnableAsyncGen() + s.mvStates = types.NewMVStates(txCount) s.rwSet = nil + return s.mvStates } func (s *StateDB) FinaliseRWSet() error { if s.rwSet == nil { return nil } + log.Debug("FinaliseRWSet", "index", s.txIndex) rwSet := s.rwSet stat := s.stat if metrics.EnabledExpensive { diff --git a/core/state_processor.go b/core/state_processor.go index 3ecf4644dd..2a3aa8c0a9 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -91,7 +91,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } statedb.MarkFullProcessed() if p.bc.enableTxDAG { - statedb.ResetMVStates(len(block.Transactions())) + statedb.ResetMVStates(len(block.Transactions())).EnableAsyncGen() } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { diff --git a/core/types/mvstates.go b/core/types/mvstates.go index c6601c45b3..8cab6a4a11 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -321,6 +321,14 @@ func (w *PendingWrites) FindPrevWrites(txIndex int) []*RWItem { return nil } +func (w *PendingWrites) Copy() *PendingWrites { + np := &PendingWrites{} + for i, item := range w.list { + np.list[i] = item + } + return np +} + type MVStates struct { rwSets map[int]*RWSet pendingAccWriteSet map[common.Address]map[AccountState]*PendingWrites @@ -365,6 +373,40 @@ func (s *MVStates) EnableAsyncGen() *MVStates { return s } +func (s *MVStates) Copy() *MVStates { + s.lock.Lock() + defer s.lock.Unlock() + if len(s.asyncGenChan) > 0 { + log.Error("It's dangerous to copy a async MVStates") + } + ns := NewMVStates(len(s.rwSets)) + ns.nextFinaliseIndex = s.nextFinaliseIndex + ns.txDepCache = append(ns.txDepCache, s.txDepCache...) + for k, v := range s.rwSets { + ns.rwSets[k] = v + } + for k, v := range s.stats { + ns.stats[k] = v + } + for addr, sub := range s.pendingAccWriteSet { + for state, writes := range sub { + if _, ok := ns.pendingAccWriteSet[addr]; !ok { + ns.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + } + ns.pendingAccWriteSet[addr][state] = writes.Copy() + } + } + for addr, sub := range s.pendingSlotWriteSet { + for slot, writes := range sub { + if _, ok := ns.pendingSlotWriteSet[addr]; !ok { + ns.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) + } + ns.pendingSlotWriteSet[addr][slot] = writes.Copy() + } + } + return ns +} + func (s *MVStates) Stop() error { s.lock.Lock() defer s.lock.Unlock() @@ -489,6 +531,9 @@ func (s *MVStates) Finalise(index int) error { return err } s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + log.Debug("Finalise the reads/writes", "index", i, + "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), + "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) } return nil diff --git a/miner/worker.go b/miner/worker.go index 1c191f9d45..5314c1b8c2 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1422,6 +1422,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { wg.Add(1) go func() { defer wg.Done() + newWork.state.MVStates().EnableAsyncGen() err := w.fillTransactions(interrupt, newWork) if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout), "parentHash", genParams.parentHash) @@ -1431,6 +1432,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { isBuildBlockInterruptCounter.Inc(1) } }() + work.state.MVStates().EnableAsyncGen() err := w.fillTransactionsAndBundles(interrupt, work) wg.Wait() timer.Stop() // don't need timeout interruption any more @@ -1439,6 +1441,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { work = newWork } } else { + work.state.MVStates().EnableAsyncGen() err := w.fillTransactions(interrupt, work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { From 69c6c715d2c9d814e24531c33dda791a5af7a713 Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 4 Sep 2024 11:34:06 +0800 Subject: [PATCH 15/42] txdag: add timeout for async generation; --- core/types/mvstates.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 8cab6a4a11..ffd5761833 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -425,6 +426,7 @@ func (s *MVStates) stopAsyncGen() { } func (s *MVStates) asyncGenLoop() { + timeout := time.After(3 * time.Second) for { select { case tx := <-s.asyncGenChan: @@ -433,6 +435,9 @@ func (s *MVStates) asyncGenLoop() { } case <-s.asyncStopChan: return + case <-timeout: + log.Warn("asyncDepGenLoop exit by timeout") + return } } } From e74d13d4fbe6f135f68609bff8a527698a621ad5 Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 4 Sep 2024 12:04:44 +0800 Subject: [PATCH 16/42] txdag: fix nil pointer issue; --- miner/worker.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 5314c1b8c2..50d6bd541d 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1422,7 +1422,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { wg.Add(1) go func() { defer wg.Done() - newWork.state.MVStates().EnableAsyncGen() + if newWork.state.MVStates() != nil { + newWork.state.MVStates().EnableAsyncGen() + } err := w.fillTransactions(interrupt, newWork) if errors.Is(err, errBlockInterruptedByTimeout) { log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout), "parentHash", genParams.parentHash) @@ -1432,7 +1434,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { isBuildBlockInterruptCounter.Inc(1) } }() - work.state.MVStates().EnableAsyncGen() + if work.state.MVStates() != nil { + work.state.MVStates().EnableAsyncGen() + } err := w.fillTransactionsAndBundles(interrupt, work) wg.Wait() timer.Stop() // don't need timeout interruption any more @@ -1441,7 +1445,9 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { work = newWork } } else { - work.state.MVStates().EnableAsyncGen() + if work.state.MVStates() != nil { + work.state.MVStates().EnableAsyncGen() + } err := w.fillTransactions(interrupt, work) timer.Stop() // don't need timeout interruption any more if errors.Is(err, errBlockInterruptedByTimeout) { From 60e4cbf839a4ced0538437acc3f23ae1bf3681c0 Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 5 Sep 2024 15:19:23 +0800 Subject: [PATCH 17/42] txdag: fix mining reset tx issue; --- core/types/mvstates.go | 11 ++++++----- core/types/mvstates_test.go | 1 - miner/worker.go | 2 ++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index ffd5761833..49503a1a9e 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -501,9 +501,6 @@ func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { s.lock.Lock() defer s.lock.Unlock() index := rwSet.ver.TxIndex - if index < s.nextFinaliseIndex { - return errors.New("fulfill a finalized RWSet") - } if stat != nil { if stat.txIndex != index { return errors.New("wrong execution stat") @@ -531,7 +528,11 @@ func (s *MVStates) Finalise(index int) error { defer s.lock.Unlock() // just finalise all previous txs - for i := s.nextFinaliseIndex; i <= index; i++ { + start := s.nextFinaliseIndex + if start > index { + start = index + } + for i := start; i <= index; i++ { if err := s.innerFinalise(i); err != nil { return err } @@ -550,7 +551,7 @@ func (s *MVStates) innerFinalise(index int) error { return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) } - if index != s.nextFinaliseIndex { + if index > s.nextFinaliseIndex { return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) } diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 792f8cd031..70e716a852 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -27,7 +27,6 @@ func TestMVStates_BasicUsage(t *testing.T) { require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) require.NoError(t, ms.Finalise(0)) - require.Error(t, ms.FulfillRWSet(mockRWSetWithVal(0, nil, nil), nil)) require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadAccState(1, common.Address{}, AccountBalance)) diff --git a/miner/worker.go b/miner/worker.go index 50d6bd541d..c8d066e279 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -912,6 +912,7 @@ func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*typ ) receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { + log.Debug("ApplyTransaction err", "block", env.header.Number.Uint64(), "tx", env.tcount, "err", err) env.state.RevertToSnapshot(snap) env.gasPool.SetGas(gp) } @@ -1388,6 +1389,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { start := time.Now() if w.chain.TxDAGEnabledWhenMine() { work.state.ResetMVStates(0) + log.Debug("ResetMVStates", "block", work.header.Number.Uint64()) } for _, tx := range genParams.txs { from, _ := types.Sender(work.signer, tx) From 0593ec337ace1fcb08a21435f4cdb2b2b03a07f3 Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 5 Sep 2024 16:45:33 +0800 Subject: [PATCH 18/42] txdag: add debug log; --- miner/worker.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/miner/worker.go b/miner/worker.go index c8d066e279..fb18731ca4 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1108,6 +1108,8 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) } + enc, _ := types.EncodeTxDAG(txDAG) + log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "enc", len(enc), "data", data, "dag", txDAG) // Create the transaction tx := types.NewTx(&types.LegacyTx{ Nonce: nonce, From 9c9e3291266e764d70a5ae816192440a4ed3e05f Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 5 Sep 2024 19:58:19 +0800 Subject: [PATCH 19/42] txdag: fix txdep cache logic to replace failed tx; --- core/types/mvstates.go | 25 +++++++++++++++---------- core/types/mvstates_test.go | 8 ++++++-- miner/worker.go | 2 +- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 49503a1a9e..2f19c88515 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -338,7 +338,7 @@ type MVStates struct { // dependency map cache for generating TxDAG // depMapCache[i].exist(j) means j->i, and i > j - txDepCache []TxDepMaker + txDepCache map[int]TxDepMaker // async dep analysis asyncGenChan chan int @@ -355,7 +355,7 @@ func NewMVStates(txCount int) *MVStates { rwSets: make(map[int]*RWSet, txCount), pendingAccWriteSet: make(map[common.Address]map[AccountState]*PendingWrites, txCount*8), pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*PendingWrites, txCount*8), - txDepCache: make([]TxDepMaker, 0, txCount), + txDepCache: make(map[int]TxDepMaker, txCount), stats: make(map[int]*ExeStat, txCount), } } @@ -382,7 +382,9 @@ func (s *MVStates) Copy() *MVStates { } ns := NewMVStates(len(s.rwSets)) ns.nextFinaliseIndex = s.nextFinaliseIndex - ns.txDepCache = append(ns.txDepCache, s.txDepCache...) + for k, v := range s.txDepCache { + ns.txDepCache[k] = v + } for k, v := range s.rwSets { ns.rwSets[k] = v } @@ -578,7 +580,8 @@ func (s *MVStates) innerFinalise(index int) error { s.pendingSlotWriteSet[k][slot].Append(&item) } } - s.nextFinaliseIndex++ + // reset nextFinaliseIndex to index+1, it may revert to previous txs + s.nextFinaliseIndex = index + 1 return nil } @@ -601,7 +604,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required depMap := NewTxDepMap(0) if rwSet.excludedTx { - s.txDepCache = append(s.txDepCache, depMap) + s.txDepCache[index] = depMap return } // check tx dependency, only check key, skip version @@ -633,14 +636,16 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { } } } + log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) // clear redundancy deps compared with prev preDeps := depMap.deps() for _, prev := range preDeps { - for _, tx := range s.txDepCache[prev].deps() { + for _, tx := range s.txDepCache[int(prev)].deps() { depMap.remove(tx) } } - s.txDepCache = append(s.txDepCache, depMap) + log.Debug("resolveDepsMapCacheByWrites after clean", "tx", index, "deps", depMap.deps()) + s.txDepCache[index] = depMap } // resolveDepsCache must be executed in order @@ -648,7 +653,7 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required depMap := NewTxDepMap(0) if rwSet.excludedTx { - s.txDepCache = append(s.txDepCache, depMap) + s.txDepCache[index] = depMap return } for prev := 0; prev < index; prev++ { @@ -682,7 +687,7 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { } } } - s.txDepCache = append(s.txDepCache, depMap) + s.txDepCache[index] = depMap } // ResolveTxDAG generate TxDAG from RWSets @@ -712,7 +717,7 @@ func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (Tx } } txDAG.TxDeps[i].TxIndexes = []uint64{} - if len(s.txDepCache) <= i { + if s.txDepCache[i] == nil { s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) } if s.rwSets[i].excludedTx { diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 70e716a852..9737c7f9e0 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -89,7 +89,11 @@ func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), + mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + }) + finaliseRWSets(t, ms, []*RWSet{ mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), @@ -509,9 +513,9 @@ func mockRandomRWSet(count int) []*RWSet { } func finaliseRWSets(t *testing.T, mv *MVStates, rwSets []*RWSet) { - for i, rwSet := range rwSets { + for _, rwSet := range rwSets { require.NoError(t, mv.FulfillRWSet(rwSet, nil)) - require.NoError(t, mv.Finalise(i)) + require.NoError(t, mv.Finalise(rwSet.ver.TxIndex)) } } diff --git a/miner/worker.go b/miner/worker.go index fb18731ca4..35d252b1de 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1109,7 +1109,7 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn } enc, _ := types.EncodeTxDAG(txDAG) - log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "enc", len(enc), "data", data, "dag", txDAG) + log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "enc", len(enc), "data", len(data), "dag", txDAG) // Create the transaction tx := types.NewTx(&types.LegacyTx{ Nonce: nonce, From 1ae383962576f91fccad5a7bc2d19555127aa045 Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 29 Aug 2024 20:52:50 +0800 Subject: [PATCH 20/42] txdag: clean code, abandon useless codes, add more async logic; txdag: refactor async logic, reduce concurrent logic; txdag: using slice cache rather than sending chan directly; txdag: opt gc issue; txdag: support init cache pool; --- cmd/evm/blockrunner.go | 2 +- core/blockchain.go | 4 +- core/state/state_object.go | 12 +- core/state/statedb.go | 230 ++++-------- core/state_processor.go | 14 +- core/state_transition.go | 24 +- core/types/dag.go | 182 +--------- core/types/dag_test.go | 12 +- core/types/mvstates.go | 698 ++++++++++++++++++++---------------- core/types/mvstates_test.go | 275 ++++---------- core/vm/interface.go | 5 +- go.mod | 1 + go.sum | 2 + miner/worker.go | 15 +- tests/block_test.go | 60 +++- tests/block_test_util.go | 5 +- 16 files changed, 638 insertions(+), 903 deletions(-) diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index c5d836e0ea..1eef34dd04 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -86,7 +86,7 @@ func blockTestCmd(ctx *cli.Context) error { continue } test := tests[name] - if err := test.Run(false, rawdb.HashScheme, tracer, func(res error, chain *core.BlockChain) { + if err := test.Run(false, rawdb.HashScheme, tracer, false, func(res error, chain *core.BlockChain) { if ctx.Bool(DumpFlag.Name) { if state, _ := chain.State(); state != nil { fmt.Println(string(state.Dump(nil))) diff --git a/core/blockchain.go b/core/blockchain.go index 71abfd8673..6b5c3320e3 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1985,12 +1985,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if bc.enableTxDAG { // compare input TxDAG when it enable in consensus - dag, err := statedb.ResolveTxDAG(len(block.Transactions()), []common.Address{block.Coinbase(), params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + dag, err := statedb.ResolveTxDAG(len(block.Transactions())) if err == nil { // TODO(galaio): check TxDAG correctness? log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) if metrics.EnabledExpensive { - go types.EvaluateTxDAGPerformance(dag, statedb.ResolveStats()) + go types.EvaluateTxDAGPerformance(dag) } } else { log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) diff --git a/core/state/state_object.go b/core/state/state_object.go index d1af1f2a58..ae534ed215 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -300,23 +300,27 @@ func (s *stateObject) finalise(prefetch bool) { } func (s *stateObject) finaliseRWSet() { + if s.db.mvStates == nil { + return + } + ms := s.db.mvStates for key, value := range s.dirtyStorage { // three are some unclean dirtyStorage from previous reverted txs, it will skip finalise // so add a new rule, if val has no change, then skip it if value == s.GetCommittedState(key) { continue } - s.db.RecordStorageWrite(s.address, key, value) + ms.RecordStorageWrite(s.address, key) } if s.dirtyNonce != nil && *s.dirtyNonce != s.data.Nonce { - s.db.RecordAccountWrite(s.address, types.AccountNonce, *s.dirtyNonce) + ms.RecordAccountWrite(s.address, types.AccountNonce) } if s.dirtyBalance != nil && s.dirtyBalance.Cmp(s.data.Balance) != 0 { - s.db.RecordAccountWrite(s.address, types.AccountBalance, new(uint256.Int).Set(s.dirtyBalance)) + ms.RecordAccountWrite(s.address, types.AccountBalance) } if s.dirtyCodeHash != nil && !slices.Equal(s.dirtyCodeHash, s.data.CodeHash) { - s.db.RecordAccountWrite(s.address, types.AccountCodeHash, s.dirtyCodeHash) + ms.RecordAccountWrite(s.address, types.AccountCodeHash) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 18be0e2f4b..ada0e9311d 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,13 +18,14 @@ package state import ( - "errors" "fmt" "runtime" "sort" "sync" "time" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/gopool" "github.com/ethereum/go-ethereum/core/rawdb" @@ -116,9 +117,7 @@ type StateDB struct { logSize uint // parallel EVM related - rwSet *types.RWSet mvStates *types.MVStates - stat *types.ExeStat // Preimages occurred seen by VM in the scope of block. preimages map[common.Hash][]byte @@ -346,10 +345,11 @@ func (s *StateDB) Empty(addr common.Address) bool { } // GetBalance retrieves the balance from the given address or 0 if object not found -func (s *StateDB) GetBalance(addr common.Address) (ret *uint256.Int) { - defer func() { - s.RecordAccountRead(addr, types.AccountBalance, ret) - }() +func (s *StateDB) GetBalance(addr common.Address) *uint256.Int { + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountBalance) + } + stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Balance() @@ -358,10 +358,10 @@ func (s *StateDB) GetBalance(addr common.Address) (ret *uint256.Int) { } // GetNonce retrieves the nonce from the given address or 0 if object not found -func (s *StateDB) GetNonce(addr common.Address) (ret uint64) { - defer func() { - s.RecordAccountRead(addr, types.AccountNonce, ret) - }() +func (s *StateDB) GetNonce(addr common.Address) uint64 { + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountNonce) + } stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Nonce() @@ -386,9 +386,9 @@ func (s *StateDB) TxIndex() int { } func (s *StateDB) GetCode(addr common.Address) []byte { - defer func() { - s.RecordAccountRead(addr, types.AccountCodeHash, s.GetCodeHash(addr)) - }() + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountCodeHash) + } stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.Code() @@ -397,9 +397,9 @@ func (s *StateDB) GetCode(addr common.Address) []byte { } func (s *StateDB) GetCodeSize(addr common.Address) int { - defer func() { - s.RecordAccountRead(addr, types.AccountCodeHash, s.GetCodeHash(addr)) - }() + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountCodeHash) + } stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.CodeSize() @@ -407,10 +407,10 @@ func (s *StateDB) GetCodeSize(addr common.Address) int { return 0 } -func (s *StateDB) GetCodeHash(addr common.Address) (ret common.Hash) { - defer func() { - s.RecordAccountRead(addr, types.AccountCodeHash, ret.Bytes()) - }() +func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountCodeHash) + } stateObject := s.getStateObject(addr) if stateObject != nil { return common.BytesToHash(stateObject.CodeHash()) @@ -419,10 +419,10 @@ func (s *StateDB) GetCodeHash(addr common.Address) (ret common.Hash) { } // GetState retrieves a value from the given account's storage trie. -func (s *StateDB) GetState(addr common.Address, hash common.Hash) (ret common.Hash) { - defer func() { - s.RecordStorageRead(addr, hash, ret) - }() +func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + if s.mvStates != nil { + s.mvStates.RecordStorageRead(addr, hash) + } stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.GetState(hash) @@ -431,10 +431,10 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) (ret common.Ha } // GetCommittedState retrieves a value from the given account's committed storage trie. -func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) (ret common.Hash) { - defer func() { - s.RecordStorageRead(addr, hash, ret) - }() +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + if s.mvStates != nil { + s.mvStates.RecordStorageRead(addr, hash) + } stateObject := s.getStateObject(addr) if stateObject != nil { return stateObject.GetCommittedState(hash) @@ -461,24 +461,26 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool { // AddBalance adds amount to the account associated with addr. func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) { + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountBalance) + } stateObject := s.getOrNewStateObject(addr) if stateObject != nil { - s.RecordAccountRead(addr, types.AccountBalance, stateObject.Balance()) stateObject.AddBalance(amount) return } - s.RecordAccountRead(addr, types.AccountBalance, common.Big0) } // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) { + if s.mvStates != nil { + s.mvStates.RecordAccountRead(addr, types.AccountBalance) + } stateObject := s.getOrNewStateObject(addr) if stateObject != nil { - s.RecordAccountRead(addr, types.AccountBalance, stateObject.Balance()) stateObject.SubBalance(amount) return } - s.RecordAccountRead(addr, types.AccountBalance, common.Big0) } func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) { @@ -658,7 +660,6 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { // flag set. This is needed by the state journal to revert to the correct s- // destructed object instead of wiping all knowledge about the state object. func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { - s.RecordAccountRead(addr, types.AccountSelf, struct{}{}) // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj @@ -933,13 +934,19 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { + var feeReceivers []common.Address + if s.mvStates != nil { + feeReceivers = s.mvStates.FeeReceivers() + } addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) // finalise stateObjectsDestruct for addr, acc := range s.stateObjectsDestructDirty { s.stateObjectsDestruct[addr] = acc + if s.mvStates != nil && !slices.Contains(feeReceivers, addr) { + s.mvStates.RecordAccountWrite(addr, types.AccountSuicide) + } } - s.stateObjectsDestructDirty = make(map[common.Address]*types.StateAccount) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] if !exist { @@ -959,6 +966,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // event is tracked. if _, ok := s.stateObjectsDestruct[obj.address]; !ok { s.stateObjectsDestruct[obj.address] = obj.origin + if s.mvStates != nil && !slices.Contains(feeReceivers, addr) { + s.mvStates.RecordAccountWrite(addr, types.AccountSuicide) + } } // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then @@ -968,6 +978,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { + if s.mvStates != nil && !slices.Contains(feeReceivers, addr) { + obj.finaliseRWSet() + } obj.finalise(true) // Prefetch slots in the background } obj.created = false @@ -979,6 +992,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // the commit-phase will be a lot faster addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } @@ -1689,129 +1703,51 @@ func (s *StateDB) GetSnap() snapshot.Snapshot { return s.snap } -func (s *StateDB) BeforeTxTransition() { - log.Debug("BeforeTxTransition", "mvStates", s.mvStates == nil, "rwSet", s.rwSet == nil) - if s.mvStates == nil { - return - } - s.rwSet = types.NewRWSet(types.StateVersion{ - TxIndex: s.txIndex, - }) -} - -func (s *StateDB) BeginTxStat(index int) { - if s.mvStates == nil { - return - } - if metrics.EnabledExpensive { - s.stat = types.NewExeStat(index).Begin() - } -} - -func (s *StateDB) StopTxStat(usedGas uint64) { +func (s *StateDB) BeginTxRecorder(index int, isExcludeTx bool) { if s.mvStates == nil { return } - // record stat first - if metrics.EnabledExpensive && s.stat != nil { - s.stat.Done().WithGas(usedGas) - rwSet := s.mvStates.RWSet(s.txIndex) - if rwSet != nil { - ar, sr := rwSet.ReadSet() - s.stat.WithRead(len(ar) + len(sr)) + if isExcludeTx { + rwSet := types.NewRWSet(index).WithExcludedTxFlag() + if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { + log.Error("MVStates SystemTx Finalise err", "err", err) } - } -} - -func (s *StateDB) RecordAccountRead(addr common.Address, state types.AccountState, val interface{}) { - if s.rwSet == nil { - return - } - s.rwSet.RecordAccountRead(addr, state, types.StateVersion{ - TxIndex: -1, - }, val) -} - -func (s *StateDB) RecordStorageRead(addr common.Address, slot common.Hash, val interface{}) { - if s.rwSet == nil { - return - } - s.rwSet.RecordStorageRead(addr, slot, types.StateVersion{ - TxIndex: -1, - }, val) -} - -func (s *StateDB) RecordAccountWrite(addr common.Address, state types.AccountState, val interface{}) { - if s.rwSet == nil { return } - s.rwSet.RecordAccountWrite(addr, state, val) + s.mvStates.RecordNewTx(index) } -func (s *StateDB) RecordStorageWrite(addr common.Address, slot common.Hash, val interface{}) { - if s.rwSet == nil { - return - } - s.rwSet.RecordStorageWrite(addr, slot, val) -} - -func (s *StateDB) ResetMVStates(txCount int) *types.MVStates { - if s.mvStates != nil { - s.mvStates.Stop() - } - s.mvStates = types.NewMVStates(txCount) - s.rwSet = nil +func (s *StateDB) ResetMVStates(txCount int, feeReceivers []common.Address) *types.MVStates { + s.mvStates = types.NewMVStates(txCount, feeReceivers) return s.mvStates } -func (s *StateDB) FinaliseRWSet() error { - if s.rwSet == nil { +func (s *StateDB) CheckFeeReceiversRWSet() error { + if s.mvStates == nil { return nil } - log.Debug("FinaliseRWSet", "index", s.txIndex) - rwSet := s.rwSet - stat := s.stat if metrics.EnabledExpensive { defer func(start time.Time) { s.TxDAGGenerate += time.Since(start) }(time.Now()) } - ver := types.StateVersion{ - TxIndex: s.txIndex, - } - if ver != rwSet.Version() { - return errors.New("you finalize a wrong ver of RWSet") - } - - // finalise stateObjectsDestruct - for addr := range s.stateObjectsDestructDirty { - s.RecordAccountWrite(addr, types.AccountSuicide, struct{}{}) - } - for addr := range s.journal.dirties { - obj, exist := s.stateObjects[addr] - if !exist { + s.mvStates.RecordReadDone() + feeReceivers := s.mvStates.FeeReceivers() + for _, addr := range feeReceivers { + if _, ok := s.stateObjectsDestructDirty[addr]; !ok { continue } - if obj.selfDestructed || obj.empty() { - // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - s.RecordAccountWrite(addr, types.AccountSuicide, struct{}{}) - } - } else { - // finalise account & storages - obj.finaliseRWSet() - } + s.mvStates.RecordCannotDelayGasFee() + return nil } - // reset stateDB - s.rwSet = nil - if err := s.mvStates.FulfillRWSet(rwSet, stat); err != nil { - return err + for _, addr := range feeReceivers { + if _, ok := s.journal.dirties[addr]; !ok { + continue + } + s.mvStates.RecordCannotDelayGasFee() + return nil } - // just Finalise rwSet in serial execution - s.mvStates.AsyncFinalise(s.txIndex) return nil } @@ -1831,7 +1767,7 @@ func (s *StateDB) removeStateObjectsDestruct(addr common.Address) { delete(s.stateObjectsDestructDirty, addr) } -func (s *StateDB) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (types.TxDAG, error) { +func (s *StateDB) ResolveTxDAG(txCnt int) (types.TxDAG, error) { if s.mvStates == nil { return types.NewEmptyTxDAG(), nil } @@ -1841,33 +1777,13 @@ func (s *StateDB) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (typ }(time.Now()) } - return s.mvStates.ResolveTxDAG(txCnt, gasFeeReceivers) -} - -func (s *StateDB) ResolveStats() map[int]*types.ExeStat { - if s.mvStates == nil { - return nil - } - - return s.mvStates.Stats() + return s.mvStates.ResolveTxDAG(txCnt) } func (s *StateDB) MVStates() *types.MVStates { return s.mvStates } -func (s *StateDB) RecordSystemTxRWSet(index int) { - if s.mvStates == nil { - return - } - s.mvStates.FulfillRWSet(types.NewRWSet(types.StateVersion{ - TxIndex: index, - }).WithExcludedTxFlag(), types.NewExeStat(index).WithExcludedTxFlag()) - if err := s.mvStates.Finalise(index); err != nil { - log.Error("MVStates SystemTx Finalise err", "err", err) - } -} - // copySet returns a deep-copied set. func copySet[k comparable](set map[k][]byte) map[k][]byte { copied := make(map[k][]byte, len(set)) diff --git a/core/state_processor.go b/core/state_processor.go index 2a3aa8c0a9..7d3d26ba2d 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -91,11 +91,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } statedb.MarkFullProcessed() if p.bc.enableTxDAG { - statedb.ResetMVStates(len(block.Transactions())).EnableAsyncGen() + feeReceivers := []common.Address{context.Coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient} + statedb.ResetMVStates(len(block.Transactions()), feeReceivers).EnableAsyncGen() } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - statedb.BeginTxStat(i) + statedb.BeginTxRecorder(i, tx.IsSystemTx() || tx.IsDepositTx()) start := time.Now() msg, err := TransactionToMessage(tx, signer, header.BaseFee) if err != nil { @@ -106,17 +107,14 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } - - // if systemTx or depositTx, tag it - if tx.IsSystemTx() || tx.IsDepositTx() { - statedb.RecordSystemTxRWSet(i) - } receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) if metrics.EnabledExpensive { processTxTimer.UpdateSince(start) } - statedb.StopTxStat(receipt.GasUsed) + } + if statedb.MVStates() != nil { + statedb.MVStates().BatchRecordHandle() } // Fail if Shanghai not enabled and len(withdrawals) is non-zero. withdrawals := block.Withdrawals() diff --git a/core/state_transition.go b/core/state_transition.go index 3e7aeaa084..a0843e3eba 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -409,10 +409,6 @@ func (st *StateTransition) preCheck() error { // However if any consensus issue encountered, return the error directly with // nil evm execution result. func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { - // start record rw set in here - if !st.msg.IsSystemTx && !st.msg.IsDepositTx { - st.state.BeforeTxTransition() - } if mint := st.msg.Mint; mint != nil { mintU256, overflow := uint256.FromBig(mint) if overflow { @@ -437,10 +433,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if st.msg.IsSystemTx && !st.evm.ChainConfig().IsRegolith(st.evm.Context.Time) { gasUsed = 0 } - // just record error tx here - if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise error deposit tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) - } result = &ExecutionResult{ UsedGas: gasUsed, Err: fmt.Errorf("failed deposit: %w", err), @@ -448,12 +440,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } err = nil } - if err != nil { - // just record error tx here - if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise error tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) - } - } return result, err } @@ -534,11 +520,6 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { } DebugInnerExecutionDuration += time.Since(start) - // stop record rw set in here, skip gas fee distribution - if ferr := st.state.FinaliseRWSet(); ferr != nil { - log.Error("finalise tx rwSet fail", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) - } - // if deposit: skip refunds, skip tipping coinbase // Regolith changes this behaviour to report the actual gasUsed instead of always reporting all gas used. if st.msg.IsDepositTx && !rules.IsOptimismRegolith { @@ -575,6 +556,11 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { ReturnData: ret, }, nil } + + // check fee receiver rwSet here + if ferr := st.state.CheckFeeReceiversRWSet(); ferr != nil { + log.Error("CheckFeeReceiversRWSet err", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) + } effectiveTip := msg.GasPrice if rules.IsLondon { effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)) diff --git a/core/types/dag.go b/core/types/dag.go index e46f7bb897..2bdcb46e97 100644 --- a/core/types/dag.go +++ b/core/types/dag.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "strings" - "time" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/metrics" @@ -394,16 +393,6 @@ func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64 return 0, false } -// travelTxDAGExecutionPaths will print all tx execution path -func travelTxDAGExecutionPaths(d TxDAG) [][]uint64 { - exePaths := make([][]uint64, 0) - // travel tx deps with BFS - for i := uint64(0); i < uint64(d.TxCount()); i++ { - exePaths = append(exePaths, travelTxDAGTargetPath(d, i)) - } - return exePaths -} - // TxDep store the current tx dependency relation with other txs type TxDep struct { TxIndexes []uint64 @@ -473,175 +462,18 @@ func (d *TxDep) ClearFlag(flag uint8) { } var ( - longestTimeTimer = metrics.NewRegisteredTimer("dag/longesttime", nil) - longestGasTimer = metrics.NewRegisteredTimer("dag/longestgas", nil) - serialTimeTimer = metrics.NewRegisteredTimer("dag/serialtime", nil) - totalTxMeter = metrics.NewRegisteredMeter("dag/txcnt", nil) - totalNoDepMeter = metrics.NewRegisteredMeter("dag/nodepcnt", nil) - total2DepMeter = metrics.NewRegisteredMeter("dag/2depcnt", nil) - total4DepMeter = metrics.NewRegisteredMeter("dag/4depcnt", nil) - total8DepMeter = metrics.NewRegisteredMeter("dag/8depcnt", nil) - total16DepMeter = metrics.NewRegisteredMeter("dag/16depcnt", nil) - total32DepMeter = metrics.NewRegisteredMeter("dag/32depcnt", nil) + totalTxMeter = metrics.NewRegisteredMeter("dag/txcnt", nil) + totalNoDepMeter = metrics.NewRegisteredMeter("dag/nodepcnt", nil) ) -func EvaluateTxDAGPerformance(dag TxDAG, stats map[int]*ExeStat) { - if len(stats) != dag.TxCount() || dag.TxCount() == 0 { +func EvaluateTxDAGPerformance(dag TxDAG) { + if dag.TxCount() == 0 { return } - paths := travelTxDAGExecutionPaths(dag) - // Attention: this is based on best schedule, it will reduce a lot by executing previous txs in parallel - // It assumes that there is no parallel thread limit - txCount := dag.TxCount() - var ( - maxGasIndex int - maxGas uint64 - maxTimeIndex int - maxTime time.Duration - txTimes = make([]time.Duration, txCount) - txGases = make([]uint64, txCount) - txReads = make([]int, txCount) - noDepCnt int - ) - - totalTxMeter.Mark(int64(txCount)) - for i, path := range paths { - if stats[i].excludedTx { - continue - } - if len(path) <= 1 { - noDepCnt++ + totalTxMeter.Mark(int64(dag.TxCount())) + for i := 0; i < dag.TxCount(); i++ { + if len(TxDependency(dag, i)) == 0 { totalNoDepMeter.Mark(1) } - if len(path) <= 3 { - total2DepMeter.Mark(1) - } - if len(path) <= 5 { - total4DepMeter.Mark(1) - } - if len(path) <= 9 { - total8DepMeter.Mark(1) - } - if len(path) <= 17 { - total16DepMeter.Mark(1) - } - if len(path) <= 33 { - total32DepMeter.Mark(1) - } - - // find the biggest cost time from dependency txs - for j := 0; j < len(path)-1; j++ { - prev := path[j] - if txTimes[prev] > txTimes[i] { - txTimes[i] = txTimes[prev] - } - if txGases[prev] > txGases[i] { - txGases[i] = txGases[prev] - } - if txReads[prev] > txReads[i] { - txReads[i] = txReads[prev] - } - } - txTimes[i] += stats[i].costTime - txGases[i] += stats[i].usedGas - txReads[i] += stats[i].readCount - - // try to find max gas - if txGases[i] > maxGas { - maxGas = txGases[i] - maxGasIndex = i - } - if txTimes[i] > maxTime { - maxTime = txTimes[i] - maxTimeIndex = i - } } - - longestTimeTimer.Update(txTimes[maxTimeIndex]) - longestGasTimer.Update(txTimes[maxGasIndex]) - // serial path - var ( - sTime time.Duration - sGas uint64 - sRead int - sPath []int - ) - for i, stat := range stats { - if stat.excludedTx { - continue - } - sPath = append(sPath, i) - sTime += stat.costTime - sGas += stat.usedGas - sRead += stat.readCount - } - serialTimeTimer.Update(sTime) -} - -// travelTxDAGTargetPath will print target execution path -func travelTxDAGTargetPath(d TxDAG, from uint64) []uint64 { - var ( - queue []uint64 - path []uint64 - ) - - queue = append(queue, from) - path = append(path, from) - for len(queue) > 0 { - var next []uint64 - for _, i := range queue { - for _, dep := range TxDependency(d, int(i)) { - if !slices.Contains(path, dep) { - path = append(path, dep) - next = append(next, dep) - } - } - } - queue = next - } - slices.Sort(path) - return path -} - -// ExeStat records tx execution info -type ExeStat struct { - txIndex int - usedGas uint64 - readCount int - startTime time.Time - costTime time.Duration - - // some flags - excludedTx bool -} - -func NewExeStat(txIndex int) *ExeStat { - return &ExeStat{ - txIndex: txIndex, - } -} - -func (s *ExeStat) Begin() *ExeStat { - s.startTime = time.Now() - return s -} - -func (s *ExeStat) Done() *ExeStat { - s.costTime = time.Since(s.startTime) - return s -} - -func (s *ExeStat) WithExcludedTxFlag() *ExeStat { - s.excludedTx = true - return s -} - -func (s *ExeStat) WithGas(gas uint64) *ExeStat { - s.usedGas = gas - return s -} - -func (s *ExeStat) WithRead(rc int) *ExeStat { - s.readCount = rc - return s } diff --git a/core/types/dag_test.go b/core/types/dag_test.go index cc50f2e5db..7da1a183b3 100644 --- a/core/types/dag_test.go +++ b/core/types/dag_test.go @@ -3,7 +3,6 @@ package types import ( "encoding/hex" "testing" - "time" "github.com/golang/snappy" @@ -50,16 +49,7 @@ func TestTxDAG(t *testing.T) { func TestEvaluateTxDAG(t *testing.T) { dag := mockSystemTxDAG() - stats := make(map[int]*ExeStat, dag.TxCount()) - for i := 0; i < dag.TxCount(); i++ { - stats[i] = NewExeStat(i).WithGas(uint64(i)).WithRead(i) - stats[i].costTime = time.Duration(i) - txDep := dag.TxDep(i) - if txDep.CheckFlag(NonDependentRelFlag) { - stats[i].WithExcludedTxFlag() - } - } - EvaluateTxDAGPerformance(dag, stats) + EvaluateTxDAGPerformance(dag) } func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 2f19c88515..aa318ac95f 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -1,7 +1,6 @@ package types import ( - "errors" "fmt" "strings" "sync" @@ -24,138 +23,113 @@ var ( ) const ( - asyncDepGenChanSize = 1000 + initRWEventCacheSize = 2 ) -// StateVersion record specific TxIndex & TxIncarnation -// if TxIndex equals to -1, it means the state read from DB. -type StateVersion struct { - TxIndex int - // Tx incarnation used for multi ver state - TxIncarnation int +func init() { + for i := 0; i < initRWEventCacheSize; i++ { + cache := make([]RWEventItem, 200) + rwEventCachePool.Put(&cache) + } } // RWSet record all read & write set in txs // Attention: this is not a concurrent safety structure type RWSet struct { - ver StateVersion - accReadSet map[common.Address]map[AccountState]RWItem - accWriteSet map[common.Address]map[AccountState]RWItem - slotReadSet map[common.Address]map[common.Hash]RWItem - slotWriteSet map[common.Address]map[common.Hash]RWItem + index int + accReadSet map[common.Address]map[AccountState]struct{} + accWriteSet map[common.Address]map[AccountState]struct{} + slotReadSet map[common.Address]map[common.Hash]struct{} + slotWriteSet map[common.Address]map[common.Hash]struct{} // some flags - rwRecordDone bool - excludedTx bool + excludedTx bool } -func NewRWSet(ver StateVersion) *RWSet { +func NewRWSet(index int) *RWSet { return &RWSet{ - ver: ver, - accReadSet: make(map[common.Address]map[AccountState]RWItem), - accWriteSet: make(map[common.Address]map[AccountState]RWItem), - slotReadSet: make(map[common.Address]map[common.Hash]RWItem), - slotWriteSet: make(map[common.Address]map[common.Hash]RWItem), + index: index, + accReadSet: make(map[common.Address]map[AccountState]struct{}), + accWriteSet: make(map[common.Address]map[AccountState]struct{}), + slotReadSet: make(map[common.Address]map[common.Hash]struct{}), + slotWriteSet: make(map[common.Address]map[common.Hash]struct{}), } } -func (s *RWSet) RecordAccountRead(addr common.Address, state AccountState, ver StateVersion, val interface{}) { +func (s *RWSet) Index() int { + return s.index +} + +func (s *RWSet) RecordAccountRead(addr common.Address, state AccountState) { // only record the first read version sub, ok := s.accReadSet[addr] if !ok { - s.accReadSet[addr] = make(map[AccountState]RWItem) - s.accReadSet[addr][state] = RWItem{ - Ver: ver, - Val: val, - } + s.accReadSet[addr] = make(map[AccountState]struct{}) + s.accReadSet[addr][AccountSelf] = struct{}{} + s.accReadSet[addr][state] = struct{}{} return } if _, ok = sub[state]; ok { return } - s.accReadSet[addr][state] = RWItem{ - Ver: ver, - Val: val, - } + s.accReadSet[addr][state] = struct{}{} } -func (s *RWSet) RecordStorageRead(addr common.Address, slot common.Hash, ver StateVersion, val interface{}) { +func (s *RWSet) RecordStorageRead(addr common.Address, slot common.Hash) { // only record the first read version sub, ok := s.slotReadSet[addr] if !ok { - s.slotReadSet[addr] = make(map[common.Hash]RWItem) - s.slotReadSet[addr][slot] = RWItem{ - Ver: ver, - Val: val, - } + s.slotReadSet[addr] = make(map[common.Hash]struct{}) + s.slotReadSet[addr][slot] = struct{}{} return } if _, ok = sub[slot]; ok { return } - s.slotReadSet[addr][slot] = RWItem{ - Ver: ver, - Val: val, - } + s.slotReadSet[addr][slot] = struct{}{} } -func (s *RWSet) RecordAccountWrite(addr common.Address, state AccountState, val interface{}) { +func (s *RWSet) RecordAccountWrite(addr common.Address, state AccountState) { _, ok := s.accWriteSet[addr] if !ok { - s.accWriteSet[addr] = make(map[AccountState]RWItem) - } - s.accWriteSet[addr][state] = RWItem{ - Ver: s.ver, - Val: val, + s.accWriteSet[addr] = make(map[AccountState]struct{}) } + s.accWriteSet[addr][state] = struct{}{} } -func (s *RWSet) RecordStorageWrite(addr common.Address, slot common.Hash, val interface{}) { +func (s *RWSet) RecordStorageWrite(addr common.Address, slot common.Hash) { _, ok := s.slotWriteSet[addr] if !ok { - s.slotWriteSet[addr] = make(map[common.Hash]RWItem) - } - s.slotWriteSet[addr][slot] = RWItem{ - Ver: s.ver, - Val: val, + s.slotWriteSet[addr] = make(map[common.Hash]struct{}) } + s.slotWriteSet[addr][slot] = struct{}{} } -func (s *RWSet) queryAccReadItem(addr common.Address, state AccountState) *RWItem { +func (s *RWSet) queryAccReadItem(addr common.Address, state AccountState) bool { sub, ok := s.accReadSet[addr] if !ok { - return nil + return false } - ret, ok := sub[state] - if !ok { - return nil - } - return &ret + _, ok = sub[state] + return ok } -func (s *RWSet) querySlotReadItem(addr common.Address, slot common.Hash) *RWItem { +func (s *RWSet) querySlotReadItem(addr common.Address, slot common.Hash) bool { sub, ok := s.slotReadSet[addr] if !ok { - return nil - } - - ret, ok := sub[slot] - if !ok { - return nil + return false } - return &ret -} -func (s *RWSet) Version() StateVersion { - return s.ver + _, ok = sub[slot] + return ok } -func (s *RWSet) ReadSet() (map[common.Address]map[AccountState]RWItem, map[common.Address]map[common.Hash]RWItem) { +func (s *RWSet) ReadSet() (map[common.Address]map[AccountState]struct{}, map[common.Address]map[common.Hash]struct{}) { return s.accReadSet, s.slotReadSet } -func (s *RWSet) WriteSet() (map[common.Address]map[AccountState]RWItem, map[common.Address]map[common.Hash]RWItem) { +func (s *RWSet) WriteSet() (map[common.Address]map[AccountState]struct{}, map[common.Address]map[common.Hash]struct{}) { return s.accWriteSet, s.slotWriteSet } @@ -166,7 +140,7 @@ func (s *RWSet) WithExcludedTxFlag() *RWSet { func (s *RWSet) String() string { builder := strings.Builder{} - builder.WriteString(fmt.Sprintf("tx: %v, inc: %v\nreadSet: [", s.ver.TxIndex, s.ver.TxIncarnation)) + builder.WriteString(fmt.Sprintf("tx: %v\nreadSet: [", s.index)) i := 0 for key, _ := range s.accReadSet { if i > 0 { @@ -206,79 +180,42 @@ func (s *RWSet) String() string { return builder.String() } -// isEqualRWVal compare state -func isEqualRWVal(accState *AccountState, src interface{}, compared interface{}) bool { - if accState != nil { - switch *accState { - case AccountBalance: - if src != nil && compared != nil { - return equalUint256(src.(*uint256.Int), compared.(*uint256.Int)) - } - return src == compared - case AccountNonce: - return src.(uint64) == compared.(uint64) - case AccountCodeHash: - if src != nil && compared != nil { - return slices.Equal(src.([]byte), compared.([]byte)) - } - return src == compared - } - return false - } - - if src != nil && compared != nil { - return src.(common.Hash) == compared.(common.Hash) - } - return src == compared -} - -func equalUint256(s, c *uint256.Int) bool { - if s != nil && c != nil { - return s.Eq(c) - } - - return s == c -} - -type RWItem struct { - Ver StateVersion - Val interface{} -} - -func NewRWItem(ver StateVersion, val interface{}) *RWItem { - return &RWItem{ - Ver: ver, - Val: val, - } -} - -func (w *RWItem) TxIndex() int { - return w.Ver.TxIndex -} +const ( + NewTxRWEvent byte = iota + ReadAccRWEvent + WriteAccRWEvent + ReadSlotRWEvent + WriteSlotRWEvent + CannotGasFeeDelayRWEvent +) -func (w *RWItem) TxIncarnation() int { - return w.Ver.TxIncarnation +type RWEventItem struct { + Event byte + Index int + Addr common.Address + State AccountState + Slot common.Hash } type PendingWrites struct { - list []*RWItem + list []int } func NewPendingWrites() *PendingWrites { return &PendingWrites{ - list: make([]*RWItem, 0), + list: make([]int, 0), } } -func (w *PendingWrites) Append(pw *RWItem) { - if i, found := w.SearchTxIndex(pw.TxIndex()); found { +func (w *PendingWrites) Append(pw int) { + if i, found := w.SearchTxIndex(pw); found { w.list[i] = pw return } w.list = append(w.list, pw) for i := len(w.list) - 1; i > 0; i-- { - if w.list[i].TxIndex() > w.list[i-1].TxIndex() { + if w.list[i] > w.list[i-1] { break } w.list[i-1], w.list[i] = w.list[i], w.list[i-1] @@ -291,30 +228,19 @@ func (w *PendingWrites) SearchTxIndex(txIndex int) (int, bool) { for i < j { h := int(uint(i+j) >> 1) // i ≤ h < j - if w.list[h].TxIndex() < txIndex { + if w.list[h] < txIndex { i = h + 1 } else { j = h } } - return i, i < n && w.list[i].TxIndex() == txIndex -} - -func (w *PendingWrites) FindLastWrite(txIndex int) *RWItem { - var i, _ = w.SearchTxIndex(txIndex) - for j := i - 1; j >= 0; j-- { - if w.list[j].TxIndex() < txIndex { - return w.list[j] - } - } - - return nil + return i, i < n && w.list[i] == txIndex } -func (w *PendingWrites) FindPrevWrites(txIndex int) []*RWItem { +func (w *PendingWrites) FindPrevWrites(txIndex int) []int { var i, _ = w.SearchTxIndex(txIndex) for j := i - 1; j >= 0; j-- { - if w.list[j].TxIndex() < txIndex { + if w.list[j] < txIndex { return w.list[:j+1] } } @@ -330,57 +256,64 @@ func (w *PendingWrites) Copy() *PendingWrites { return np } +var ( + rwEventCachePool = sync.Pool{New: func() any { + buf := make([]RWEventItem, 0) + return &buf + }} +) + type MVStates struct { rwSets map[int]*RWSet pendingAccWriteSet map[common.Address]map[AccountState]*PendingWrites pendingSlotWriteSet map[common.Address]map[common.Hash]*PendingWrites nextFinaliseIndex int + gasFeeReceivers []common.Address // dependency map cache for generating TxDAG // depMapCache[i].exist(j) means j->i, and i > j - txDepCache map[int]TxDepMaker + txDepCache map[int]TxDep - // async dep analysis - asyncGenChan chan int - asyncStopChan chan struct{} - asyncRunning bool + // async rw event recorder + asyncRWSet *RWSet + rwEventCh chan []RWEventItem + rwEventCache []RWEventItem + rwEventCacheIndex int + recordeReadDone bool // execution stat infos - stats map[int]*ExeStat - lock sync.RWMutex + lock sync.RWMutex + asyncWG sync.WaitGroup + cannotGasFeeDelay bool } -func NewMVStates(txCount int) *MVStates { - return &MVStates{ +func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { + m := &MVStates{ rwSets: make(map[int]*RWSet, txCount), - pendingAccWriteSet: make(map[common.Address]map[AccountState]*PendingWrites, txCount*8), - pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*PendingWrites, txCount*8), - txDepCache: make(map[int]TxDepMaker, txCount), - stats: make(map[int]*ExeStat, txCount), + pendingAccWriteSet: make(map[common.Address]map[AccountState]*PendingWrites, txCount), + pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*PendingWrites, txCount), + txDepCache: make(map[int]TxDep, txCount), + rwEventCh: make(chan []RWEventItem, 100), + gasFeeReceivers: gasFeeReceivers, } + m.rwEventCache = *rwEventCachePool.Get().(*[]RWEventItem) + m.rwEventCache = m.rwEventCache[:cap(m.rwEventCache)] + m.rwEventCacheIndex = 0 + return m } func (s *MVStates) EnableAsyncGen() *MVStates { s.lock.Lock() defer s.lock.Unlock() - chanSize := asyncDepGenChanSize - if len(s.rwSets) > 0 && len(s.rwSets) < asyncDepGenChanSize { - chanSize = len(s.rwSets) - } - s.asyncGenChan = make(chan int, chanSize) - s.asyncStopChan = make(chan struct{}) - s.asyncRunning = true - go s.asyncGenLoop() + s.asyncWG.Add(1) + go s.asyncRWEventLoop() return s } func (s *MVStates) Copy() *MVStates { s.lock.Lock() defer s.lock.Unlock() - if len(s.asyncGenChan) > 0 { - log.Error("It's dangerous to copy a async MVStates") - } - ns := NewMVStates(len(s.rwSets)) + ns := NewMVStates(len(s.rwSets), s.gasFeeReceivers) ns.nextFinaliseIndex = s.nextFinaliseIndex for k, v := range s.txDepCache { ns.txDepCache[k] = v @@ -388,9 +321,6 @@ func (s *MVStates) Copy() *MVStates { for k, v := range s.rwSets { ns.rwSets[k] = v } - for k, v := range s.stats { - ns.stats[k] = v - } for addr, sub := range s.pendingAccWriteSet { for state, writes := range sub { if _, ok := ns.pendingAccWriteSet[addr]; !ok { @@ -410,32 +340,21 @@ func (s *MVStates) Copy() *MVStates { return ns } -func (s *MVStates) Stop() error { - s.lock.Lock() - defer s.lock.Unlock() - s.stopAsyncGen() - return nil -} - -func (s *MVStates) stopAsyncGen() { - if !s.asyncRunning { - return - } - s.asyncRunning = false - if s.asyncStopChan != nil { - close(s.asyncStopChan) - } -} - -func (s *MVStates) asyncGenLoop() { +func (s *MVStates) asyncRWEventLoop() { + defer s.asyncWG.Done() timeout := time.After(3 * time.Second) for { select { - case tx := <-s.asyncGenChan: - if err := s.Finalise(tx); err != nil { - log.Error("async MVStates Finalise err", "err", err) + case items, ok := <-s.rwEventCh: + if !ok { + return } - case <-s.asyncStopChan: + for _, item := range items { + s.handleRWEvent(item) + } + rwEventCachePool.Put(&items) + case <-timeout: + log.Warn("asyncRWEventLoop timeout") return case <-timeout: log.Warn("asyncDepGenLoop exit by timeout") @@ -444,91 +363,198 @@ func (s *MVStates) asyncGenLoop() { } } -func (s *MVStates) RWSets() map[int]*RWSet { - s.lock.RLock() - defer s.lock.RUnlock() - return s.rwSets +func (s *MVStates) handleRWEvent(item RWEventItem) { + s.lock.Lock() + defer s.lock.Unlock() + // init next RWSet, and finalise previous RWSet + if item.Event == NewTxRWEvent { + if item.Index > 0 { + s.finalisePreviousRWSet() + } + s.asyncRWSet = NewRWSet(item.Index) + return + } + // recorde current as cannot gas fee delay + if item.Event == CannotGasFeeDelayRWEvent { + s.cannotGasFeeDelay = true + return + } + if s.asyncRWSet == nil { + return + } + switch item.Event { + // recorde current read/write event + case ReadAccRWEvent: + s.asyncRWSet.RecordAccountRead(item.Addr, item.State) + case ReadSlotRWEvent: + s.asyncRWSet.RecordStorageRead(item.Addr, item.Slot) + case WriteAccRWEvent: + s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) + case WriteSlotRWEvent: + s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) + } } -func (s *MVStates) Stats() map[int]*ExeStat { - s.lock.RLock() - defer s.lock.RUnlock() - return s.stats +func (s *MVStates) finalisePreviousRWSet() { + if s.asyncRWSet == nil { + return + } + index := s.asyncRWSet.index + if err := s.quickFinaliseWithRWSet(s.asyncRWSet); err != nil { + log.Error("Finalise err when handle NewTxRWEvent", "tx", index, "err", err) + return + } + // check if there are RW with gas fee receiver for gas delay calculation + for _, addr := range s.gasFeeReceivers { + if _, exist := s.asyncRWSet.accReadSet[addr]; !exist { + continue + } + if _, exist := s.asyncRWSet.accReadSet[addr][AccountSelf]; exist { + s.cannotGasFeeDelay = true + } + } + s.resolveDepsMapCacheByWrites(index, s.asyncRWSet) } -func (s *MVStates) RWSet(index int) *RWSet { - s.lock.RLock() - defer s.lock.RUnlock() - if index >= len(s.rwSets) { - return nil +func (s *MVStates) RecordNewTx(index int) { + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = NewTxRWEvent + s.rwEventCache[s.rwEventCacheIndex].Index = index + } else { + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: NewTxRWEvent, + Index: index, + }) } - return s.rwSets[index] + s.rwEventCacheIndex++ + s.recordeReadDone = false + s.BatchRecordHandle() } -// ReadAccState read state from MVStates -func (s *MVStates) ReadAccState(txIndex int, addr common.Address, state AccountState) *RWItem { - s.lock.RLock() - defer s.lock.RUnlock() +func (s *MVStates) RecordAccountRead(addr common.Address, state AccountState) { + if s.recordeReadDone { + return + } + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = ReadAccRWEvent + s.rwEventCache[s.rwEventCacheIndex].Addr = addr + s.rwEventCache[s.rwEventCacheIndex].State = state + s.rwEventCacheIndex++ + return + } + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: ReadAccRWEvent, + Addr: addr, + State: state, + }) + s.rwEventCacheIndex++ +} - sub, ok := s.pendingAccWriteSet[addr] - if !ok { - return nil +func (s *MVStates) RecordStorageRead(addr common.Address, slot common.Hash) { + if s.recordeReadDone { + return } - wset, ok := sub[state] - if !ok { - return nil + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = ReadSlotRWEvent + s.rwEventCache[s.rwEventCacheIndex].Addr = addr + s.rwEventCache[s.rwEventCacheIndex].Slot = slot + s.rwEventCacheIndex++ + return } - return wset.FindLastWrite(txIndex) + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: ReadSlotRWEvent, + Addr: addr, + Slot: slot, + }) + s.rwEventCacheIndex++ } -// ReadSlotState read state from MVStates -func (s *MVStates) ReadSlotState(txIndex int, addr common.Address, slot common.Hash) *RWItem { - s.lock.RLock() - defer s.lock.RUnlock() +func (s *MVStates) RecordReadDone() { + s.recordeReadDone = true +} - sub, ok := s.pendingSlotWriteSet[addr] - if !ok { - return nil +func (s *MVStates) RecordAccountWrite(addr common.Address, state AccountState) { + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = WriteAccRWEvent + s.rwEventCache[s.rwEventCacheIndex].Addr = addr + s.rwEventCache[s.rwEventCacheIndex].State = state + s.rwEventCacheIndex++ + return } - wset, ok := sub[slot] - if !ok { - return nil + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: WriteAccRWEvent, + Addr: addr, + State: state, + }) + s.rwEventCacheIndex++ +} + +func (s *MVStates) RecordStorageWrite(addr common.Address, slot common.Hash) { + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = WriteSlotRWEvent + s.rwEventCache[s.rwEventCacheIndex].Addr = addr + s.rwEventCache[s.rwEventCacheIndex].Slot = slot + s.rwEventCacheIndex++ + return } - return wset.FindLastWrite(txIndex) + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: WriteSlotRWEvent, + Addr: addr, + Slot: slot, + }) + s.rwEventCacheIndex++ } -// FulfillRWSet it can execute as async, and rwSet & stat must guarantee read-only -// try to generate TxDAG, when fulfill RWSet -func (s *MVStates) FulfillRWSet(rwSet *RWSet, stat *ExeStat) error { - s.lock.Lock() - defer s.lock.Unlock() - index := rwSet.ver.TxIndex - if stat != nil { - if stat.txIndex != index { - return errors.New("wrong execution stat") - } - s.stats[index] = stat +func (s *MVStates) RecordCannotDelayGasFee() { + if s.rwEventCacheIndex < len(s.rwEventCache) { + s.rwEventCache[s.rwEventCacheIndex].Event = CannotGasFeeDelayRWEvent + s.rwEventCacheIndex++ + return } - s.rwSets[index] = rwSet - return nil + s.rwEventCache = append(s.rwEventCache, RWEventItem{ + Event: CannotGasFeeDelayRWEvent, + }) + s.rwEventCacheIndex++ } -// AsyncFinalise it will put target write set into pending writes. -func (s *MVStates) AsyncFinalise(index int) { - // async resolve dependency, but non-block action - if s.asyncRunning && s.asyncGenChan != nil { - select { - case s.asyncGenChan <- index: - default: - } +func (s *MVStates) BatchRecordHandle() { + if len(s.rwEventCache) == 0 { + return } + s.rwEventCh <- s.rwEventCache[:s.rwEventCacheIndex] + s.rwEventCache = *rwEventCachePool.Get().(*[]RWEventItem) + s.rwEventCache = s.rwEventCache[:cap(s.rwEventCache)] + s.rwEventCacheIndex = 0 } -// Finalise it will put target write set into pending writes. -func (s *MVStates) Finalise(index int) error { +func (s *MVStates) stopAsyncRecorder() { + close(s.rwEventCh) + s.asyncWG.Wait() +} + +// quickFinaliseWithRWSet it just store RWSet and inc pendingIndex +func (s *MVStates) quickFinaliseWithRWSet(rwSet *RWSet) error { + index := rwSet.index + if s.nextFinaliseIndex != index { + return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) + } + s.rwSets[index] = rwSet + s.nextFinaliseIndex++ + return nil +} + +// FinaliseWithRWSet it will put target write set into pending writes. +func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { s.lock.Lock() defer s.lock.Unlock() - + if s.asyncRWSet == nil { + s.asyncRWSet = nil + } + index := rwSet.index + if s.nextFinaliseIndex > index { + return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) + } + s.rwSets[index] = rwSet // just finalise all previous txs start := s.nextFinaliseIndex if start > index { @@ -543,6 +569,7 @@ func (s *MVStates) Finalise(index int) error { "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) } + s.rwSets[index] = rwSet return nil } @@ -562,22 +589,22 @@ func (s *MVStates) innerFinalise(index int) error { if _, exist := s.pendingAccWriteSet[addr]; !exist { s.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) } - for state, item := range sub { + for state := range sub { if _, exist := s.pendingAccWriteSet[addr][state]; !exist { s.pendingAccWriteSet[addr][state] = NewPendingWrites() } - s.pendingAccWriteSet[addr][state].Append(&item) + s.pendingAccWriteSet[addr][state].Append(index) } } - for k, sub := range rwSet.slotWriteSet { - if _, exist := s.pendingSlotWriteSet[k]; !exist { - s.pendingSlotWriteSet[k] = make(map[common.Hash]*PendingWrites) + for addr, sub := range rwSet.slotWriteSet { + if _, exist := s.pendingSlotWriteSet[addr]; !exist { + s.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) } - for slot, item := range sub { - if _, exist := s.pendingSlotWriteSet[k][slot]; !exist { - s.pendingSlotWriteSet[k][slot] = NewPendingWrites() + for slot := range sub { + if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { + s.pendingSlotWriteSet[addr][slot] = NewPendingWrites() } - s.pendingSlotWriteSet[k][slot].Append(&item) + s.pendingSlotWriteSet[addr][slot].Append(index) } } // reset nextFinaliseIndex to index+1, it may revert to previous txs @@ -585,6 +612,28 @@ func (s *MVStates) innerFinalise(index int) error { return nil } +func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common.Hash) { + // append to pending write set + if _, exist := s.pendingSlotWriteSet[addr]; !exist { + s.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) + } + if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { + s.pendingSlotWriteSet[addr][slot] = NewPendingWrites() + } + s.pendingSlotWriteSet[addr][slot].Append(index) +} + +func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state AccountState) { + // append to pending write set + if _, exist := s.pendingAccWriteSet[addr]; !exist { + s.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + } + if _, exist := s.pendingAccWriteSet[addr][state]; !exist { + s.pendingAccWriteSet[addr][state] = NewPendingWrites() + } + s.pendingAccWriteSet[addr][state].Append(index) +} + func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *PendingWrites { if _, exist := s.pendingAccWriteSet[addr]; !exist { return nil @@ -602,11 +651,11 @@ func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *Pendi // resolveDepsMapCacheByWrites must be executed in order func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - depMap := NewTxDepMap(0) if rwSet.excludedTx { - s.txDepCache[index] = depMap + s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) return } + depMap := NewTxDepMap(0) // check tx dependency, only check key, skip version for addr, sub := range rwSet.accReadSet { for state := range sub { @@ -620,7 +669,11 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { } items := writes.FindPrevWrites(index) for _, item := range items { - depMap.add(uint64(item.TxIndex())) + tx := uint64(item) + if depMap.exist(tx) { + continue + } + depMap.add(tx) } } } @@ -632,7 +685,11 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { } items := writes.FindPrevWrites(index) for _, item := range items { - depMap.add(uint64(item.TxIndex())) + tx := uint64(item) + if depMap.exist(tx) { + continue + } + depMap.add(tx) } } } @@ -640,27 +697,27 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { // clear redundancy deps compared with prev preDeps := depMap.deps() for _, prev := range preDeps { - for _, tx := range s.txDepCache[int(prev)].deps() { + for _, tx := range s.txDepCache[int(prev)].TxIndexes { depMap.remove(tx) } } log.Debug("resolveDepsMapCacheByWrites after clean", "tx", index, "deps", depMap.deps()) - s.txDepCache[index] = depMap + s.txDepCache[index] = NewTxDep(depMap.deps()) } // resolveDepsCache must be executed in order func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - depMap := NewTxDepMap(0) if rwSet.excludedTx { - s.txDepCache[index] = depMap + s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) return } + depMap := NewTxDepMap(0) for prev := 0; prev < index; prev++ { // if there are some parallel execution or system txs, it will fulfill in advance // it's ok, and try re-generate later - prevSet, ok := s.rwSets[prev] - if !ok { + prevSet := s.rwSets[prev] + if prevSet == nil { continue } // if prev tx is tagged ExcludedTxFlag, just skip the check @@ -672,7 +729,7 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { depMap.add(uint64(prev)) // clear redundancy deps compared with prev for _, dep := range depMap.deps() { - if s.txDepCache[prev].exist(dep) { + if slices.Contains(s.txDepCache[prev].TxIndexes, dep) { depMap.remove(dep) } } @@ -681,52 +738,35 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { depMap.add(uint64(prev)) // clear redundancy deps compared with prev for _, dep := range depMap.deps() { - if s.txDepCache[prev].exist(dep) { + if slices.Contains(s.txDepCache[prev].TxIndexes, dep) { depMap.remove(dep) } } } } - s.txDepCache[index] = depMap + s.txDepCache[index] = NewTxDep(depMap.deps()) } // ResolveTxDAG generate TxDAG from RWSets -func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (TxDAG, error) { +func (s *MVStates) ResolveTxDAG(txCnt int) (TxDAG, error) { + s.BatchRecordHandle() + s.stopAsyncRecorder() + s.lock.Lock() defer s.lock.Unlock() - if len(s.rwSets) != txCnt { - return nil, fmt.Errorf("wrong rwSet count, expect: %v, actual: %v", txCnt, len(s.rwSets)) + if s.cannotGasFeeDelay { + return NewEmptyTxDAG(), nil } - s.stopAsyncGen() - // collect all rw sets, try to finalise them - for i := s.nextFinaliseIndex; i < txCnt; i++ { - if err := s.innerFinalise(i); err != nil { - return nil, err - } + s.finalisePreviousRWSet() + if s.nextFinaliseIndex != txCnt { + return nil, fmt.Errorf("cannot resolve with wrong FinaliseIndex, expect: %v, now: %v", txCnt, s.nextFinaliseIndex) } txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { - // check if there are RW with gas fee receiver for gas delay calculation - for _, addr := range gasFeeReceivers { - if _, ok := s.rwSets[i].accReadSet[addr]; !ok { - continue - } - if _, ok := s.rwSets[i].accReadSet[addr][AccountSelf]; ok { - return NewEmptyTxDAG(), nil - } - } - txDAG.TxDeps[i].TxIndexes = []uint64{} - if s.txDepCache[i] == nil { - s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) - } - if s.rwSets[i].excludedTx { - txDAG.TxDeps[i].SetFlag(ExcludedTxFlag) - continue - } - deps := s.txDepCache[i].deps() + deps := s.txDepCache[i].TxIndexes if len(deps) <= (txCnt-1)/2 { - txDAG.TxDeps[i].TxIndexes = deps + txDAG.TxDeps[i] = s.txDepCache[i] continue } // if tx deps larger than half of txs, then convert with NonDependentRelFlag @@ -741,7 +781,11 @@ func (s *MVStates) ResolveTxDAG(txCnt int, gasFeeReceivers []common.Address) (Tx return txDAG, nil } -func checkAccDependency(writeSet map[common.Address]map[AccountState]RWItem, readSet map[common.Address]map[AccountState]RWItem) bool { +func (s *MVStates) FeeReceivers() []common.Address { + return s.gasFeeReceivers +} + +func checkAccDependency(writeSet map[common.Address]map[AccountState]struct{}, readSet map[common.Address]map[AccountState]struct{}) bool { // check tx dependency, only check key, skip version for addr, sub := range writeSet { if _, ok := readSet[addr]; !ok { @@ -764,7 +808,7 @@ func checkAccDependency(writeSet map[common.Address]map[AccountState]RWItem, rea return false } -func checkSlotDependency(writeSet map[common.Address]map[common.Hash]RWItem, readSet map[common.Address]map[common.Hash]RWItem) bool { +func checkSlotDependency(writeSet map[common.Address]map[common.Hash]struct{}, readSet map[common.Address]map[common.Hash]struct{}) bool { // check tx dependency, only check key, skip version for addr, sub := range writeSet { if _, ok := readSet[addr]; !ok { @@ -786,6 +830,7 @@ type TxDepMaker interface { deps() []uint64 remove(index uint64) len() int + reset() } type TxDepMap struct { @@ -830,3 +875,42 @@ func (m *TxDepMap) remove(index uint64) { func (m *TxDepMap) len() int { return len(m.tm) } + +func (m *TxDepMap) reset() { + m.cache = nil + m.tm = make(map[uint64]struct{}) +} + +// isEqualRWVal compare state +func isEqualRWVal(accState *AccountState, src interface{}, compared interface{}) bool { + if accState != nil { + switch *accState { + case AccountBalance: + if src != nil && compared != nil { + return equalUint256(src.(*uint256.Int), compared.(*uint256.Int)) + } + return src == compared + case AccountNonce: + return src.(uint64) == compared.(uint64) + case AccountCodeHash: + if src != nil && compared != nil { + return slices.Equal(src.([]byte), compared.([]byte)) + } + return src == compared + } + return false + } + + if src != nil && compared != nil { + return src.(common.Hash) == compared.(common.Hash) + } + return src == compared +} + +func equalUint256(s, c *uint256.Int) bool { + if s != nil && c != nil { + return s.Eq(c) + } + + return s == c +} diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 9737c7f9e0..f22ba40fe4 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -17,83 +17,16 @@ import ( ) const ( - mockRWSetSize = 5000 + mockRWSetSize = 10000 ) -func TestMVStates_BasicUsage(t *testing.T) { - ms := NewMVStates(0) - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(0, []interface{}{AccountSelf, nil, AccountBalance, 0, "0x00", 0}, []interface{}{AccountBalance, 0, "0x00", 0}), nil)) - require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) - require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) - require.NoError(t, ms.Finalise(0)) - - require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) - require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadAccState(1, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 0}, 0), ms.ReadSlotState(1, common.Address{}, str2Slot("0x00"))) - - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(1, []interface{}{AccountSelf, nil, AccountBalance, 0, "0x01", 1}, []interface{}{AccountBalance, 1, "0x01", 1}), nil)) - require.Nil(t, ms.ReadSlotState(1, common.Address{}, str2Slot("0x01"))) - require.NoError(t, ms.Finalise(1)) - require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) - - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(2, []interface{}{AccountSelf, nil, AccountBalance, 1, "0x02", 2, "0x01", 1}, []interface{}{AccountBalance, 2, "0x01", 2, "0x02", 2}), nil)) - require.NoError(t, ms.Finalise(2)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadAccState(3, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadSlotState(3, common.Address{}, str2Slot("0x01"))) - - require.NoError(t, ms.FulfillRWSet(mockRWSetWithVal(3, []interface{}{AccountSelf, nil, AccountBalance, 2, "0x03", 3, "0x00", 0, "0x01", 2}, []interface{}{AccountBalance, 3, "0x00", 3, "0x01", 3, "0x03", 3}), nil)) - require.Nil(t, ms.ReadSlotState(3, common.Address{}, str2Slot("0x03"))) - require.NoError(t, ms.Finalise(3)) - require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadSlotState(2, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 1}, 1), ms.ReadAccState(2, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadSlotState(3, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 2}, 2), ms.ReadAccState(3, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadSlotState(4, common.Address{}, str2Slot("0x01"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadAccState(4, common.Address{}, AccountBalance)) - require.Nil(t, ms.ReadSlotState(0, common.Address{}, str2Slot("0x00"))) - require.Nil(t, ms.ReadAccState(0, common.Address{}, AccountBalance)) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadSlotState(5, common.Address{}, str2Slot("0x00"))) - require.Equal(t, NewRWItem(StateVersion{TxIndex: 3}, 3), ms.ReadAccState(5, common.Address{}, AccountBalance)) -} - func TestMVStates_SimpleResolveTxDAG(t *testing.T) { - ms := NewMVStates(10) - finaliseRWSets(t, ms, []*RWSet{ - mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), - mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), - mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), - mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), - mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), - mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), - mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), - mockRWSet(7, []interface{}{"0x06", "0x07"}, []interface{}{"0x07"}), - mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), - mockRWSet(9, []interface{}{"0x08", "0x09"}, []interface{}{"0x09"}), - }) - - dag, err := ms.ResolveTxDAG(10, nil) - require.NoError(t, err) - require.Equal(t, mockSimpleDAG(), dag) - t.Log(dag) -} - -func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { - ms := NewMVStates(10).EnableAsyncGen() + ms := NewMVStates(10, nil).EnableAsyncGen() finaliseRWSets(t, ms, []*RWSet{ mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), - mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), - mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), - }) - finaliseRWSets(t, ms, []*RWSet{ mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), @@ -101,41 +34,24 @@ func TestMVStates_AsyncDepGen_SimpleResolveTxDAG(t *testing.T) { mockRWSet(8, []interface{}{"0x08"}, []interface{}{"0x08"}), mockRWSet(9, []interface{}{"0x08", "0x09"}, []interface{}{"0x09"}), }) - time.Sleep(10 * time.Millisecond) - dag, err := ms.ResolveTxDAG(10, nil) + dag, err := ms.ResolveTxDAG(10) require.NoError(t, err) - time.Sleep(100 * time.Millisecond) - require.NoError(t, ms.Stop()) require.Equal(t, mockSimpleDAG(), dag) t.Log(dag) } -func TestMVStates_ResolveTxDAG_Async(t *testing.T) { - txCnt := 10000 - rwSets := mockRandomRWSet(txCnt) - ms1 := NewMVStates(txCnt).EnableAsyncGen() - for i := 0; i < txCnt; i++ { - require.NoError(t, ms1.FulfillRWSet(rwSets[i], nil)) - require.NoError(t, ms1.Finalise(i)) - } - time.Sleep(100 * time.Millisecond) - _, err := ms1.ResolveTxDAG(txCnt, nil) - require.NoError(t, err) -} - func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { txCnt := 3000 rwSets := mockRandomRWSet(txCnt) - ms1 := NewMVStates(txCnt) - ms2 := NewMVStates(txCnt) + ms1 := NewMVStates(txCnt, nil).EnableAsyncGen() + ms2 := NewMVStates(txCnt, nil).EnableAsyncGen() for i, rwSet := range rwSets { ms1.rwSets[i] = rwSet - ms2.rwSets[i] = rwSet - require.NoError(t, ms2.Finalise(i)) + require.NoError(t, ms2.FinaliseWithRWSet(rwSet)) } - d1 := resolveTxDAGInMVStates(ms1) + d1 := resolveTxDAGInMVStates(ms1, txCnt) d2 := resolveDepsMapCacheByWritesInMVStates(ms2) require.Equal(t, d1.(*PlainTxDAG).String(), d2.(*PlainTxDAG).String()) } @@ -143,10 +59,9 @@ func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { func TestMVStates_TxDAG_Compression(t *testing.T) { txCnt := 10000 rwSets := mockRandomRWSet(txCnt) - ms1 := NewMVStates(txCnt) - for i, rwSet := range rwSets { - ms1.rwSets[i] = rwSet - ms1.Finalise(i) + ms1 := NewMVStates(txCnt, nil).EnableAsyncGen() + for _, rwSet := range rwSets { + ms1.FinaliseWithRWSet(rwSet) } dag := resolveDepsMapCacheByWritesInMVStates(ms1) enc, err := EncodeTxDAG(dag) @@ -175,22 +90,21 @@ func TestMVStates_TxDAG_Compression(t *testing.T) { func BenchmarkResolveTxDAGInMVStates(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) - ms1 := NewMVStates(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() for i, rwSet := range rwSets { ms1.rwSets[i] = rwSet } b.ResetTimer() for i := 0; i < b.N; i++ { - resolveTxDAGInMVStates(ms1) + resolveTxDAGInMVStates(ms1, mockRWSetSize) } } func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) - ms1 := NewMVStates(mockRWSetSize) - for i, rwSet := range rwSets { - ms1.rwSets[i] = rwSet - ms1.innerFinalise(i) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() + for _, rwSet := range rwSets { + ms1.FinaliseWithRWSet(rwSet) } b.ResetTimer() for i := 0; i < b.N; i++ { @@ -200,36 +114,36 @@ func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { func BenchmarkMVStates_Finalise(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) - ms1 := NewMVStates(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() b.ResetTimer() for i := 0; i < b.N; i++ { - for k, rwSet := range rwSets { - ms1.rwSets[k] = rwSet - ms1.innerFinalise(k) + for _, rwSet := range rwSets { + ms1.FinaliseWithRWSet(rwSet) } } } -func resolveTxDAGInMVStates(s *MVStates) TxDAG { - txDAG := NewPlainTxDAG(len(s.rwSets)) - for i := 0; i < len(s.rwSets); i++ { +func resolveTxDAGInMVStates(s *MVStates, txCnt int) TxDAG { + txDAG := NewPlainTxDAG(txCnt) + for i := 0; i < txCnt; i++ { s.resolveDepsCache(i, s.rwSets[i]) - txDAG.TxDeps[i].TxIndexes = s.txDepCache[i].deps() + txDAG.TxDeps[i] = s.txDepCache[i] } return txDAG } func resolveDepsMapCacheByWritesInMVStates(s *MVStates) TxDAG { - txDAG := NewPlainTxDAG(len(s.rwSets)) - for i := 0; i < len(s.rwSets); i++ { + txCnt := s.nextFinaliseIndex + txDAG := NewPlainTxDAG(txCnt) + for i := 0; i < txCnt; i++ { s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) - txDAG.TxDeps[i].TxIndexes = s.txDepCache[i].deps() + txDAG.TxDeps[i] = s.txDepCache[i] } return txDAG } func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { - ms := NewMVStates(12) + ms := NewMVStates(12, nil).EnableAsyncGen() finaliseRWSets(t, ms, []*RWSet{ mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), @@ -245,14 +159,14 @@ func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { mockRWSet(11, []interface{}{"0x11"}, []interface{}{"0x11"}).WithExcludedTxFlag(), }) - dag, err := ms.ResolveTxDAG(12, nil) + dag, err := ms.ResolveTxDAG(12) require.NoError(t, err) require.Equal(t, mockSystemTxDAG(), dag) t.Log(dag) } func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { - ms := NewMVStates(12) + ms := NewMVStates(12, nil).EnableAsyncGen() finaliseRWSets(t, ms, []*RWSet{ mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), @@ -267,7 +181,7 @@ func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { mockRWSet(10, []interface{}{"0x10"}, []interface{}{"0x10"}).WithExcludedTxFlag(), mockRWSet(11, []interface{}{"0x11"}, []interface{}{"0x11"}).WithExcludedTxFlag(), }) - dag, err := ms.ResolveTxDAG(12, nil) + dag, err := ms.ResolveTxDAG(12) require.NoError(t, err) require.Equal(t, mockSystemTxDAGWithLargeDeps(), dag) t.Log(dag) @@ -371,107 +285,65 @@ func TestIsEqualRWVal(t *testing.T) { } } -func mockRWSet(index int, read []interface{}, write []interface{}) *RWSet { - ver := StateVersion{ - TxIndex: index, +func TestTxRecorder_Basic(t *testing.T) { + sets := []*RWSet{ + mockRWSet(0, []interface{}{AccountSelf, AccountBalance, "0x00"}, + []interface{}{AccountBalance, AccountCodeHash, "0x00"}), + mockRWSet(1, []interface{}{AccountSelf, AccountBalance, "0x01"}, + []interface{}{AccountBalance, AccountCodeHash, "0x01"}), + mockRWSet(2, []interface{}{AccountSelf, AccountBalance, "0x01", "0x01"}, + []interface{}{AccountBalance, AccountCodeHash, "0x01"}), } - set := NewRWSet(ver) - set.accReadSet[common.Address{}] = map[AccountState]RWItem{} - set.accWriteSet[common.Address{}] = map[AccountState]RWItem{} - set.slotReadSet[common.Address{}] = map[common.Hash]RWItem{} - set.slotWriteSet[common.Address{}] = map[common.Hash]RWItem{} - for _, k := range read { - state, ok := k.(AccountState) - if ok { - set.accReadSet[common.Address{}][state] = RWItem{ - Ver: ver, - Val: struct{}{}, + ms := NewMVStates(0, nil).EnableAsyncGen() + for _, item := range sets { + ms.RecordNewTx(item.index) + for addr, sub := range item.accReadSet { + for state := range sub { + ms.RecordAccountRead(addr, state) } - } else { - set.slotReadSet[common.Address{}][str2Slot(k.(string))] = RWItem{ - Ver: ver, - Val: struct{}{}, + } + for addr, sub := range item.slotReadSet { + for slot := range sub { + ms.RecordStorageRead(addr, slot) } } - } - for _, k := range write { - state, ok := k.(AccountState) - if ok { - set.accWriteSet[common.Address{}][state] = RWItem{ - Ver: ver, - Val: struct{}{}, + for addr, sub := range item.accWriteSet { + for state := range sub { + ms.RecordAccountWrite(addr, state) } - } else { - set.slotWriteSet[common.Address{}][str2Slot(k.(string))] = RWItem{ - Ver: ver, - Val: struct{}{}, + } + for addr, sub := range item.slotWriteSet { + for slot := range sub { + ms.RecordStorageWrite(addr, slot) } } } - - return set -} - -func mockUintSlice(cnt int) []uint64 { - ret := make([]uint64, cnt) - for i := 0; i < cnt; i++ { - ret[i] = rand.Uint64() % uint64(cnt) - } - return ret + dag, err := ms.ResolveTxDAG(3) + require.NoError(t, err) + t.Log(dag) } -func mockRWSetWithVal(index int, read []interface{}, write []interface{}) *RWSet { - ver := StateVersion{ - TxIndex: index, - } - set := NewRWSet(ver) - - if len(read)%2 != 0 { - panic("wrong read size") - } - if len(write)%2 != 0 { - panic("wrong write size") - } - - set.accReadSet[common.Address{}] = map[AccountState]RWItem{} - set.slotReadSet[common.Address{}] = map[common.Hash]RWItem{} - set.accWriteSet[common.Address{}] = map[AccountState]RWItem{} - set.slotWriteSet[common.Address{}] = map[common.Hash]RWItem{} - for i := 0; i < len(read); { - state, ok := read[i].(AccountState) +func mockRWSet(index int, read []interface{}, write []interface{}) *RWSet { + set := NewRWSet(index) + set.accReadSet[common.Address{}] = map[AccountState]struct{}{} + set.accWriteSet[common.Address{}] = map[AccountState]struct{}{} + set.slotReadSet[common.Address{}] = map[common.Hash]struct{}{} + set.slotWriteSet[common.Address{}] = map[common.Hash]struct{}{} + for _, k := range read { + state, ok := k.(AccountState) if ok { - set.accReadSet[common.Address{}][state] = RWItem{ - Ver: StateVersion{ - TxIndex: index - 1, - }, - Val: read[i+1], - } + set.accReadSet[common.Address{}][state] = struct{}{} } else { - slot := str2Slot(read[i].(string)) - set.slotReadSet[common.Address{}][slot] = RWItem{ - Ver: StateVersion{ - TxIndex: index - 1, - }, - Val: read[i+1], - } + set.slotReadSet[common.Address{}][str2Slot(k.(string))] = struct{}{} } - i += 2 } - for i := 0; i < len(write); { - state, ok := write[i].(AccountState) + for _, k := range write { + state, ok := k.(AccountState) if ok { - set.accWriteSet[common.Address{}][state] = RWItem{ - Ver: ver, - Val: write[i+1], - } + set.accWriteSet[common.Address{}][state] = struct{}{} } else { - slot := str2Slot(write[i].(string)) - set.slotWriteSet[common.Address{}][slot] = RWItem{ - Ver: ver, - Val: write[i+1], - } + set.slotWriteSet[common.Address{}][str2Slot(k.(string))] = struct{}{} } - i += 2 } return set @@ -514,8 +386,7 @@ func mockRandomRWSet(count int) []*RWSet { func finaliseRWSets(t *testing.T, mv *MVStates, rwSets []*RWSet) { for _, rwSet := range rwSets { - require.NoError(t, mv.FulfillRWSet(rwSet, nil)) - require.NoError(t, mv.Finalise(rwSet.ver.TxIndex)) + require.NoError(t, mv.FinaliseWithRWSet(rwSet)) } } diff --git a/core/vm/interface.go b/core/vm/interface.go index 49de25e803..d37e8bfe65 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -81,10 +81,7 @@ type StateDB interface { AddPreimage(common.Hash, []byte) TxIndex() int - - // parallel DAG related - BeforeTxTransition() - FinaliseRWSet() error + CheckFeeReceiversRWSet() error } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/go.mod b/go.mod index 73768e2685..b46d2b8255 100644 --- a/go.mod +++ b/go.mod @@ -110,6 +110,7 @@ require ( github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect diff --git a/go.sum b/go.sum index 0671fa8e41..c7854ffc84 100644 --- a/go.sum +++ b/go.sum @@ -350,6 +350,8 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZi github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/dot v0.11.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/miner/worker.go b/miner/worker.go index 35d252b1de..6c68b13d65 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1053,7 +1053,7 @@ func (w *worker) appendTxDAG(env *environment) { return } // TODO this is a placeholder for the tx DAG data that will be generated by the stateDB - txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.coinbase, env.gasForTxDAG) + txForDAG, err := w.generateDAGTx(env.state, env.signer, env.tcount, env.gasForTxDAG) if err != nil { log.Warn("failed to generate DAG tx", "err", err) return @@ -1067,7 +1067,7 @@ func (w *worker) appendTxDAG(env *environment) { } // generateDAGTx generates a DAG transaction for the block -func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, coinbase common.Address, gasLimitForDag uint64) (*types.Transaction, error) { +func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIndex int, gasLimitForDag uint64) (*types.Transaction, error) { if statedb == nil { return nil, fmt.Errorf("failed to get state db, env.state=nil") } @@ -1083,14 +1083,14 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn } // get txDAG data from the stateDB - txDAG, err := statedb.ResolveTxDAG(txIndex, []common.Address{coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient}) + txDAG, err := statedb.ResolveTxDAG(txIndex) if txDAG == nil { return nil, err } // txIndex is the index of this txDAG transaction txDAG.SetTxDep(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) if metrics.EnabledExpensive { - go types.EvaluateTxDAGPerformance(txDAG, statedb.ResolveStats()) + go types.EvaluateTxDAGPerformance(txDAG) } publicKey := sender.Public() @@ -1390,19 +1390,18 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { start := time.Now() if w.chain.TxDAGEnabledWhenMine() { - work.state.ResetMVStates(0) + feeReceivers := []common.Address{work.coinbase, params.OptimismBaseFeeRecipient, params.OptimismL1FeeRecipient} + work.state.ResetMVStates(0, feeReceivers) log.Debug("ResetMVStates", "block", work.header.Number.Uint64()) } for _, tx := range genParams.txs { from, _ := types.Sender(work.signer, tx) work.state.SetTxContext(tx.Hash(), work.tcount) + work.state.BeginTxRecorder(work.tcount, tx.IsSystemTx() || tx.IsDepositTx()) _, err := w.commitTransaction(work, tx) if err != nil { return &newPayloadResult{err: fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err)} } - if tx.IsSystemTx() || tx.IsDepositTx() { - work.state.RecordSystemTxRWSet(work.tcount) - } work.tcount++ } commitDepositTxsTimer.UpdateSince(start) diff --git a/tests/block_test.go b/tests/block_test.go index fb355085fd..a650ca07f3 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -73,20 +73,72 @@ func TestExecutionSpecBlocktests(t *testing.T) { }) } +func TestBlockchainWithTxDAG(t *testing.T) { + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) + bt := new(testMatcher) + // General state tests are 'exported' as blockchain tests, but we can run them natively. + // For speedier CI-runs, the line below can be uncommented, so those are skipped. + // For now, in hardfork-times (Berlin), we run the tests both as StateTests and + // as blockchain tests, since the latter also covers things like receipt root + bt.skipLoad(`^GeneralStateTests/`) + + // Skip random failures due to selfish mining test + bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`) + + // Slow tests + bt.slow(`.*bcExploitTest/DelegateCallSpam.json`) + bt.slow(`.*bcExploitTest/ShanghaiLove.json`) + bt.slow(`.*bcExploitTest/SuicideIssue.json`) + bt.slow(`.*/bcForkStressTest/`) + bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`) + bt.slow(`.*/bcWalletTest/`) + + // Very slow test + bt.skipLoad(`.*/stTimeConsuming/.*`) + // test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range, + // using 4.6 TGas + bt.skipLoad(`.*randomStatetest94.json.*`) + + bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { + if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 { + t.Skip("test (randomly) skipped on 32-bit windows") + } + if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, true, nil)); err != nil { + t.Errorf("test in path mode with snapshotter failed: %v", err) + return + } + }) + + //bt := new(testMatcher) + //path := filepath.Join(blockTestDir, "ValidBlocks", "bcStatetests", "refundReset.json") + //_, name := filepath.Split(path) + //t.Run(name, func(t *testing.T) { + // bt.runTestFile(t, path, name, func(t *testing.T, name string, test *BlockTest) { + // if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 { + // t.Skip("test (randomly) skipped on 32-bit windows") + // } + // if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, true, nil)); err != nil { + // t.Errorf("test in path mode with snapshotter failed: %v", err) + // return + // } + // }) + //}) +} + func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) { - if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil, nil)); err != nil { + if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil, false, nil)); err != nil { t.Errorf("test in hash mode without snapshotter failed: %v", err) return } - if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil, nil)); err != nil { + if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil, false, nil)); err != nil { t.Errorf("test in hash mode with snapshotter failed: %v", err) return } - if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil, nil)); err != nil { + if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil, false, nil)); err != nil { t.Errorf("test in path mode without snapshotter failed: %v", err) return } - if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, nil)); err != nil { + if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, false, nil)); err != nil { t.Errorf("test in path mode with snapshotter failed: %v", err) return } diff --git a/tests/block_test_util.go b/tests/block_test_util.go index e8ca68dee0..252a2cdd09 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -109,7 +109,7 @@ type btHeaderMarshaling struct { ExcessBlobGas *math.HexOrDecimal64 } -func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, postCheck func(error, *core.BlockChain)) (result error) { +func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, enableTxDAG bool, postCheck func(error, *core.BlockChain)) (result error) { config, ok := Forks[t.json.Network] if !ok { return UnsupportedForkError{t.json.Network} @@ -159,6 +159,9 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po return err } defer chain.Stop() + if enableTxDAG { + chain.SetupTxDAGGeneration() + } validBlocks, err := t.insertBlocks(chain) if err != nil { From 39100e747f5e13c20477864ae0aeb4cb2e336c80 Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 4 Sep 2024 20:52:16 +0800 Subject: [PATCH 21/42] txdag: adaptor txdag for mining; --- core/blockchain.go | 2 +- core/state/statedb.go | 6 +++--- core/state_processor.go | 2 +- core/types/mvstates.go | 6 ++---- miner/worker.go | 1 - 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 6b5c3320e3..db1c979494 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1988,7 +1988,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) dag, err := statedb.ResolveTxDAG(len(block.Transactions())) if err == nil { // TODO(galaio): check TxDAG correctness? - log.Debug("Process TxDAG result", "block", block.NumberU64(), "txDAG", dag) + log.Debug("Process TxDAG result", "block", block.NumberU64(), "tx", len(block.Transactions()), "txDAG", dag) if metrics.EnabledExpensive { go types.EvaluateTxDAGPerformance(dag) } diff --git a/core/state/statedb.go b/core/state/statedb.go index ada0e9311d..415e712a86 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1703,18 +1703,18 @@ func (s *StateDB) GetSnap() snapshot.Snapshot { return s.snap } -func (s *StateDB) BeginTxRecorder(index int, isExcludeTx bool) { +func (s *StateDB) BeginTxRecorder(isExcludeTx bool) { if s.mvStates == nil { return } if isExcludeTx { - rwSet := types.NewRWSet(index).WithExcludedTxFlag() + rwSet := types.NewRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { log.Error("MVStates SystemTx Finalise err", "err", err) } return } - s.mvStates.RecordNewTx(index) + s.mvStates.RecordNewTx(s.txIndex) } func (s *StateDB) ResetMVStates(txCount int, feeReceivers []common.Address) *types.MVStates { diff --git a/core/state_processor.go b/core/state_processor.go index 7d3d26ba2d..171440cfd5 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -96,7 +96,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - statedb.BeginTxRecorder(i, tx.IsSystemTx() || tx.IsDepositTx()) start := time.Now() msg, err := TransactionToMessage(tx, signer, header.BaseFee) if err != nil { @@ -130,6 +129,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) + statedb.BeginTxRecorder(tx.IsSystemTx() || tx.IsDepositTx()) nonce := tx.Nonce() if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { diff --git a/core/types/mvstates.go b/core/types/mvstates.go index aa318ac95f..fff029d169 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -23,7 +23,7 @@ var ( ) const ( - initRWEventCacheSize = 2 + initRWEventCacheSize = 4 ) func init() { @@ -314,6 +314,7 @@ func (s *MVStates) Copy() *MVStates { s.lock.Lock() defer s.lock.Unlock() ns := NewMVStates(len(s.rwSets), s.gasFeeReceivers) + ns.cannotGasFeeDelay = s.cannotGasFeeDelay ns.nextFinaliseIndex = s.nextFinaliseIndex for k, v := range s.txDepCache { ns.txDepCache[k] = v @@ -356,9 +357,6 @@ func (s *MVStates) asyncRWEventLoop() { case <-timeout: log.Warn("asyncRWEventLoop timeout") return - case <-timeout: - log.Warn("asyncDepGenLoop exit by timeout") - return } } } diff --git a/miner/worker.go b/miner/worker.go index 6c68b13d65..3a94758861 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1397,7 +1397,6 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { for _, tx := range genParams.txs { from, _ := types.Sender(work.signer, tx) work.state.SetTxContext(tx.Hash(), work.tcount) - work.state.BeginTxRecorder(work.tcount, tx.IsSystemTx() || tx.IsDepositTx()) _, err := w.commitTransaction(work, tx) if err != nil { return &newPayloadResult{err: fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err)} From e483990df416ff48b77ee5728e400ff3ec47e291 Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 4 Sep 2024 22:06:31 +0800 Subject: [PATCH 22/42] txdag: clean codes; --- core/state/statedb.go | 15 +++-- core/state_transition.go | 6 +- core/types/mvstates.go | 112 +++++++++--------------------------- core/types/mvstates_test.go | 99 ------------------------------- core/vm/interface.go | 4 +- 5 files changed, 40 insertions(+), 196 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 415e712a86..8c9d243764 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -947,6 +947,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { s.mvStates.RecordAccountWrite(addr, types.AccountSuicide) } } + s.stateObjectsDestructDirty = make(map[common.Address]*types.StateAccount) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] if !exist { @@ -992,7 +993,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // the commit-phase will be a lot faster addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } - + if s.mvStates != nil { + s.mvStates.RecordWriteDone() + } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } @@ -1707,6 +1710,7 @@ func (s *StateDB) BeginTxRecorder(isExcludeTx bool) { if s.mvStates == nil { return } + log.Debug("BeginTxRecorder", "tx", s.txIndex) if isExcludeTx { rwSet := types.NewRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { @@ -1722,9 +1726,9 @@ func (s *StateDB) ResetMVStates(txCount int, feeReceivers []common.Address) *typ return s.mvStates } -func (s *StateDB) CheckFeeReceiversRWSet() error { +func (s *StateDB) CheckFeeReceiversRWSet() { if s.mvStates == nil { - return nil + return } if metrics.EnabledExpensive { defer func(start time.Time) { @@ -1738,7 +1742,7 @@ func (s *StateDB) CheckFeeReceiversRWSet() error { continue } s.mvStates.RecordCannotDelayGasFee() - return nil + return } for _, addr := range feeReceivers { @@ -1746,9 +1750,8 @@ func (s *StateDB) CheckFeeReceiversRWSet() error { continue } s.mvStates.RecordCannotDelayGasFee() - return nil + return } - return nil } func (s *StateDB) getStateObjectsDestruct(addr common.Address) (*types.StateAccount, bool) { diff --git a/core/state_transition.go b/core/state_transition.go index a0843e3eba..a5a8f184ae 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) @@ -558,9 +557,8 @@ func (st *StateTransition) innerTransitionDb() (*ExecutionResult, error) { } // check fee receiver rwSet here - if ferr := st.state.CheckFeeReceiversRWSet(); ferr != nil { - log.Error("CheckFeeReceiversRWSet err", "block", st.evm.Context.BlockNumber, "tx", st.evm.StateDB.TxIndex(), "err", ferr) - } + st.state.CheckFeeReceiversRWSet() + effectiveTip := msg.GasPrice if rules.IsLondon { effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index fff029d169..e7d13cc072 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/holiman/uint256" "golang.org/x/exp/slices" ) @@ -56,10 +55,6 @@ func NewRWSet(index int) *RWSet { } } -func (s *RWSet) Index() int { - return s.index -} - func (s *RWSet) RecordAccountRead(addr common.Address, state AccountState) { // only record the first read version sub, ok := s.accReadSet[addr] @@ -105,26 +100,6 @@ func (s *RWSet) RecordStorageWrite(addr common.Address, slot common.Hash) { s.slotWriteSet[addr][slot] = struct{}{} } -func (s *RWSet) queryAccReadItem(addr common.Address, state AccountState) bool { - sub, ok := s.accReadSet[addr] - if !ok { - return false - } - - _, ok = sub[state] - return ok -} - -func (s *RWSet) querySlotReadItem(addr common.Address, slot common.Hash) bool { - sub, ok := s.slotReadSet[addr] - if !ok { - return false - } - - _, ok = sub[slot] - return ok -} - func (s *RWSet) ReadSet() (map[common.Address]map[AccountState]struct{}, map[common.Address]map[common.Hash]struct{}) { return s.accReadSet, s.slotReadSet } @@ -275,11 +250,13 @@ type MVStates struct { txDepCache map[int]TxDep // async rw event recorder + // these fields are only used in one routine asyncRWSet *RWSet rwEventCh chan []RWEventItem rwEventCache []RWEventItem rwEventCacheIndex int - recordeReadDone bool + recordingRead bool + recordingWrite bool // execution stat infos lock sync.RWMutex @@ -409,6 +386,7 @@ func (s *MVStates) finalisePreviousRWSet() { } if _, exist := s.asyncRWSet.accReadSet[addr][AccountSelf]; exist { s.cannotGasFeeDelay = true + break } } s.resolveDepsMapCacheByWrites(index, s.asyncRWSet) @@ -425,12 +403,21 @@ func (s *MVStates) RecordNewTx(index int) { }) } s.rwEventCacheIndex++ - s.recordeReadDone = false + s.recordingRead = true + s.recordingWrite = true s.BatchRecordHandle() } +func (s *MVStates) RecordReadDone() { + s.recordingRead = false +} + +func (s *MVStates) RecordWriteDone() { + s.recordingWrite = false +} + func (s *MVStates) RecordAccountRead(addr common.Address, state AccountState) { - if s.recordeReadDone { + if !s.recordingRead { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -449,7 +436,7 @@ func (s *MVStates) RecordAccountRead(addr common.Address, state AccountState) { } func (s *MVStates) RecordStorageRead(addr common.Address, slot common.Hash) { - if s.recordeReadDone { + if !s.recordingRead { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -467,11 +454,10 @@ func (s *MVStates) RecordStorageRead(addr common.Address, slot common.Hash) { s.rwEventCacheIndex++ } -func (s *MVStates) RecordReadDone() { - s.recordeReadDone = true -} - func (s *MVStates) RecordAccountWrite(addr common.Address, state AccountState) { + if !s.recordingWrite { + return + } if s.rwEventCacheIndex < len(s.rwEventCache) { s.rwEventCache[s.rwEventCacheIndex].Event = WriteAccRWEvent s.rwEventCache[s.rwEventCacheIndex].Addr = addr @@ -488,6 +474,9 @@ func (s *MVStates) RecordAccountWrite(addr common.Address, state AccountState) { } func (s *MVStates) RecordStorageWrite(addr common.Address, slot common.Hash) { + if !s.recordingWrite { + return + } if s.rwEventCacheIndex < len(s.rwEventCache) { s.rwEventCache[s.rwEventCacheIndex].Event = WriteSlotRWEvent s.rwEventCache[s.rwEventCacheIndex].Addr = addr @@ -504,6 +493,9 @@ func (s *MVStates) RecordStorageWrite(addr common.Address, slot common.Hash) { } func (s *MVStates) RecordCannotDelayGasFee() { + if !s.recordingWrite { + return + } if s.rwEventCacheIndex < len(s.rwEventCache) { s.rwEventCache[s.rwEventCacheIndex].Event = CannotGasFeeDelayRWEvent s.rwEventCacheIndex++ @@ -516,7 +508,7 @@ func (s *MVStates) RecordCannotDelayGasFee() { } func (s *MVStates) BatchRecordHandle() { - if len(s.rwEventCache) == 0 { + if s.rwEventCacheIndex == 0 { return } s.rwEventCh <- s.rwEventCache[:s.rwEventCacheIndex] @@ -545,9 +537,6 @@ func (s *MVStates) quickFinaliseWithRWSet(rwSet *RWSet) error { func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { s.lock.Lock() defer s.lock.Unlock() - if s.asyncRWSet == nil { - s.asyncRWSet = nil - } index := rwSet.index if s.nextFinaliseIndex > index { return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) @@ -567,7 +556,6 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) } - s.rwSets[index] = rwSet return nil } @@ -822,15 +810,6 @@ func checkSlotDependency(writeSet map[common.Address]map[common.Hash]struct{}, r return false } -type TxDepMaker interface { - add(index uint64) - exist(index uint64) bool - deps() []uint64 - remove(index uint64) - len() int - reset() -} - type TxDepMap struct { tm map[uint64]struct{} cache []uint64 @@ -873,42 +852,3 @@ func (m *TxDepMap) remove(index uint64) { func (m *TxDepMap) len() int { return len(m.tm) } - -func (m *TxDepMap) reset() { - m.cache = nil - m.tm = make(map[uint64]struct{}) -} - -// isEqualRWVal compare state -func isEqualRWVal(accState *AccountState, src interface{}, compared interface{}) bool { - if accState != nil { - switch *accState { - case AccountBalance: - if src != nil && compared != nil { - return equalUint256(src.(*uint256.Int), compared.(*uint256.Int)) - } - return src == compared - case AccountNonce: - return src.(uint64) == compared.(uint64) - case AccountCodeHash: - if src != nil && compared != nil { - return slices.Equal(src.([]byte), compared.([]byte)) - } - return src == compared - } - return false - } - - if src != nil && compared != nil { - return src.(common.Hash) == compared.(common.Hash) - } - return src == compared -} - -func equalUint256(s, c *uint256.Int) bool { - if s != nil && c != nil { - return s.Eq(c) - } - - return s == c -} diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index f22ba40fe4..2f3eba3828 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -12,7 +12,6 @@ import ( "github.com/cometbft/cometbft/libs/rand" "github.com/golang/snappy" - "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) @@ -187,104 +186,6 @@ func TestMVStates_SystemTxWithLargeDepsResolveTxDAG(t *testing.T) { t.Log(dag) } -func TestIsEqualRWVal(t *testing.T) { - tests := []struct { - key *AccountState - src interface{} - compared interface{} - isEqual bool - }{ - { - key: &AccountNonce, - src: uint64(0), - compared: uint64(0), - isEqual: true, - }, - { - key: &AccountNonce, - src: uint64(0), - compared: uint64(1), - isEqual: false, - }, - { - key: &AccountBalance, - src: new(uint256.Int).SetUint64(1), - compared: new(uint256.Int).SetUint64(1), - isEqual: true, - }, - { - key: &AccountBalance, - src: nil, - compared: new(uint256.Int).SetUint64(1), - isEqual: false, - }, - { - key: &AccountBalance, - src: (*uint256.Int)(nil), - compared: new(uint256.Int).SetUint64(1), - isEqual: false, - }, - { - key: &AccountBalance, - src: (*uint256.Int)(nil), - compared: (*uint256.Int)(nil), - isEqual: true, - }, - { - key: &AccountCodeHash, - src: []byte{1}, - compared: []byte{1}, - isEqual: true, - }, - { - key: &AccountCodeHash, - src: nil, - compared: []byte{1}, - isEqual: false, - }, - { - key: &AccountCodeHash, - src: ([]byte)(nil), - compared: []byte{1}, - isEqual: false, - }, - { - key: &AccountCodeHash, - src: ([]byte)(nil), - compared: ([]byte)(nil), - isEqual: true, - }, - { - key: &AccountSuicide, - src: struct{}{}, - compared: struct{}{}, - isEqual: false, - }, - { - key: &AccountSuicide, - src: nil, - compared: struct{}{}, - isEqual: false, - }, - { - key: nil, - src: mockHash, - compared: mockHash, - isEqual: true, - }, - { - key: nil, - src: nil, - compared: mockHash, - isEqual: false, - }, - } - - for i, item := range tests { - require.Equal(t, item.isEqual, isEqualRWVal(item.key, item.src, item.compared), i) - } -} - func TestTxRecorder_Basic(t *testing.T) { sets := []*RWSet{ mockRWSet(0, []interface{}{AccountSelf, AccountBalance, "0x00"}, diff --git a/core/vm/interface.go b/core/vm/interface.go index d37e8bfe65..c8636ef643 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -81,7 +81,9 @@ type StateDB interface { AddPreimage(common.Hash, []byte) TxIndex() int - CheckFeeReceiversRWSet() error + + // parallel DAG related + CheckFeeReceiversRWSet() } // CallContext provides a basic interface for the EVM calling conventions. The EVM From 80b1c5a8031382dd559d44dfbeabc561b120b485 Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 5 Sep 2024 23:12:51 +0800 Subject: [PATCH 23/42] txdag: opt generation logic, support stop mvstates; --- core/blockchain.go | 14 ------ core/state/statedb.go | 15 ++++--- core/state_processor.go | 18 +++++++- core/types/mvstates.go | 86 ++++++++++++++++--------------------- core/types/mvstates_test.go | 31 ++++++++++++- miner/worker.go | 11 +++-- 6 files changed, 100 insertions(+), 75 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index db1c979494..516fe23747 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1983,20 +1983,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) vtime := time.Since(vstart) proctime := time.Since(start) // processing + validation - if bc.enableTxDAG { - // compare input TxDAG when it enable in consensus - dag, err := statedb.ResolveTxDAG(len(block.Transactions())) - if err == nil { - // TODO(galaio): check TxDAG correctness? - log.Debug("Process TxDAG result", "block", block.NumberU64(), "tx", len(block.Transactions()), "txDAG", dag) - if metrics.EnabledExpensive { - go types.EvaluateTxDAGPerformance(dag) - } - } else { - log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) - } - } - // Update the metrics touched during block processing and validation accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) diff --git a/core/state/statedb.go b/core/state/statedb.go index 8c9d243764..9bb623c72b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -993,9 +993,6 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // the commit-phase will be a lot faster addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } - if s.mvStates != nil { - s.mvStates.RecordWriteDone() - } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } @@ -1706,11 +1703,11 @@ func (s *StateDB) GetSnap() snapshot.Snapshot { return s.snap } -func (s *StateDB) BeginTxRecorder(isExcludeTx bool) { +func (s *StateDB) StartTxRecorder(isExcludeTx bool) { if s.mvStates == nil { return } - log.Debug("BeginTxRecorder", "tx", s.txIndex) + log.Debug("StartTxRecorder", "tx", s.txIndex) if isExcludeTx { rwSet := types.NewRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { @@ -1721,6 +1718,14 @@ func (s *StateDB) BeginTxRecorder(isExcludeTx bool) { s.mvStates.RecordNewTx(s.txIndex) } +func (s *StateDB) StopTxRecorder() { + if s.mvStates == nil { + return + } + s.mvStates.RecordReadDone() + s.mvStates.RecordWriteDone() +} + func (s *StateDB) ResetMVStates(txCount int, feeReceivers []common.Address) *types.MVStates { s.mvStates = types.NewMVStates(txCount, feeReceivers) return s.mvStates diff --git a/core/state_processor.go b/core/state_processor.go index 171440cfd5..dd0aaeb008 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -22,6 +22,8 @@ import ( "math/big" "time" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" @@ -122,6 +124,19 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) + if p.bc.enableTxDAG { + // compare input TxDAG when it enable in consensus + dag, err := statedb.ResolveTxDAG(len(block.Transactions())) + if err == nil { + // TODO(galaio): check TxDAG correctness? + log.Debug("Process TxDAG result", "block", block.NumberU64(), "tx", len(block.Transactions()), "txDAG", dag) + if metrics.EnabledExpensive { + go types.EvaluateTxDAGPerformance(dag) + } + } else { + log.Error("ResolveTxDAG err", "block", block.NumberU64(), "tx", len(block.Transactions()), "err", err) + } + } return receipts, allLogs, *usedGas, nil } @@ -129,7 +144,8 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) - statedb.BeginTxRecorder(tx.IsSystemTx() || tx.IsDepositTx()) + statedb.StartTxRecorder(tx.IsSystemTx() || tx.IsDepositTx()) + defer statedb.StopTxRecorder() nonce := tx.Nonce() if msg.IsDepositTx && config.IsOptimismRegolith(evm.Context.Time) { diff --git a/core/types/mvstates.go b/core/types/mvstates.go index e7d13cc072..300e0adf48 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" "sync" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -42,7 +41,8 @@ type RWSet struct { slotWriteSet map[common.Address]map[common.Hash]struct{} // some flags - excludedTx bool + excludedTx bool + cannotGasFeeDelay bool } func NewRWSet(index int) *RWSet { @@ -244,10 +244,10 @@ type MVStates struct { pendingSlotWriteSet map[common.Address]map[common.Hash]*PendingWrites nextFinaliseIndex int gasFeeReceivers []common.Address - // dependency map cache for generating TxDAG // depMapCache[i].exist(j) means j->i, and i > j txDepCache map[int]TxDep + lock sync.RWMutex // async rw event recorder // these fields are only used in one routine @@ -257,11 +257,8 @@ type MVStates struct { rwEventCacheIndex int recordingRead bool recordingWrite bool - - // execution stat infos - lock sync.RWMutex + asyncRunning bool asyncWG sync.WaitGroup - cannotGasFeeDelay bool } func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { @@ -280,18 +277,20 @@ func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { } func (s *MVStates) EnableAsyncGen() *MVStates { - s.lock.Lock() - defer s.lock.Unlock() s.asyncWG.Add(1) + s.asyncRunning = true go s.asyncRWEventLoop() return s } +func (s *MVStates) Stop() { + s.stopAsyncRecorder() +} + func (s *MVStates) Copy() *MVStates { s.lock.Lock() defer s.lock.Unlock() ns := NewMVStates(len(s.rwSets), s.gasFeeReceivers) - ns.cannotGasFeeDelay = s.cannotGasFeeDelay ns.nextFinaliseIndex = s.nextFinaliseIndex for k, v := range s.txDepCache { ns.txDepCache[k] = v @@ -320,7 +319,6 @@ func (s *MVStates) Copy() *MVStates { func (s *MVStates) asyncRWEventLoop() { defer s.asyncWG.Done() - timeout := time.After(3 * time.Second) for { select { case items, ok := <-s.rwEventCh: @@ -331,9 +329,6 @@ func (s *MVStates) asyncRWEventLoop() { s.handleRWEvent(item) } rwEventCachePool.Put(&items) - case <-timeout: - log.Warn("asyncRWEventLoop timeout") - return } } } @@ -343,17 +338,10 @@ func (s *MVStates) handleRWEvent(item RWEventItem) { defer s.lock.Unlock() // init next RWSet, and finalise previous RWSet if item.Event == NewTxRWEvent { - if item.Index > 0 { - s.finalisePreviousRWSet() - } + s.finalisePreviousRWSet() s.asyncRWSet = NewRWSet(item.Index) return } - // recorde current as cannot gas fee delay - if item.Event == CannotGasFeeDelayRWEvent { - s.cannotGasFeeDelay = true - return - } if s.asyncRWSet == nil { return } @@ -367,6 +355,9 @@ func (s *MVStates) handleRWEvent(item RWEventItem) { s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) case WriteSlotRWEvent: s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) + // recorde current as cannot gas fee delay + case CannotGasFeeDelayRWEvent: + s.asyncRWSet.cannotGasFeeDelay = true } } @@ -375,20 +366,22 @@ func (s *MVStates) finalisePreviousRWSet() { return } index := s.asyncRWSet.index - if err := s.quickFinaliseWithRWSet(s.asyncRWSet); err != nil { - log.Error("Finalise err when handle NewTxRWEvent", "tx", index, "err", err) - return - } + s.rwSets[index] = s.asyncRWSet + // check if there are RW with gas fee receiver for gas delay calculation for _, addr := range s.gasFeeReceivers { if _, exist := s.asyncRWSet.accReadSet[addr]; !exist { continue } if _, exist := s.asyncRWSet.accReadSet[addr][AccountSelf]; exist { - s.cannotGasFeeDelay = true + s.rwSets[index].cannotGasFeeDelay = true break } } + if err := s.innerFinalise(index, false); err != nil { + log.Error("Finalise err when handle NewTxRWEvent", "tx", index, "err", err) + return + } s.resolveDepsMapCacheByWrites(index, s.asyncRWSet) } @@ -518,19 +511,12 @@ func (s *MVStates) BatchRecordHandle() { } func (s *MVStates) stopAsyncRecorder() { - close(s.rwEventCh) - s.asyncWG.Wait() -} - -// quickFinaliseWithRWSet it just store RWSet and inc pendingIndex -func (s *MVStates) quickFinaliseWithRWSet(rwSet *RWSet) error { - index := rwSet.index - if s.nextFinaliseIndex != index { - return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) + if s.asyncRunning { + s.asyncRunning = false + s.BatchRecordHandle() + close(s.rwEventCh) + s.asyncWG.Wait() } - s.rwSets[index] = rwSet - s.nextFinaliseIndex++ - return nil } // FinaliseWithRWSet it will put target write set into pending writes. @@ -538,9 +524,6 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { s.lock.Lock() defer s.lock.Unlock() index := rwSet.index - if s.nextFinaliseIndex > index { - return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) - } s.rwSets[index] = rwSet // just finalise all previous txs start := s.nextFinaliseIndex @@ -548,7 +531,7 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { start = index } for i := start; i <= index; i++ { - if err := s.innerFinalise(i); err != nil { + if err := s.innerFinalise(i, true); err != nil { return err } s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) @@ -560,7 +543,7 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { return nil } -func (s *MVStates) innerFinalise(index int) error { +func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { rwSet := s.rwSets[index] if rwSet == nil { return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) @@ -570,6 +553,12 @@ func (s *MVStates) innerFinalise(index int) error { return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) } + // reset nextFinaliseIndex to index+1, it may revert to previous txs + s.nextFinaliseIndex = index + 1 + if !applyWriteSet { + return nil + } + // append to pending write set for addr, sub := range rwSet.accWriteSet { if _, exist := s.pendingAccWriteSet[addr]; !exist { @@ -593,8 +582,6 @@ func (s *MVStates) innerFinalise(index int) error { s.pendingSlotWriteSet[addr][slot].Append(index) } } - // reset nextFinaliseIndex to index+1, it may revert to previous txs - s.nextFinaliseIndex = index + 1 return nil } @@ -735,14 +722,10 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { // ResolveTxDAG generate TxDAG from RWSets func (s *MVStates) ResolveTxDAG(txCnt int) (TxDAG, error) { - s.BatchRecordHandle() s.stopAsyncRecorder() s.lock.Lock() defer s.lock.Unlock() - if s.cannotGasFeeDelay { - return NewEmptyTxDAG(), nil - } s.finalisePreviousRWSet() if s.nextFinaliseIndex != txCnt { return nil, fmt.Errorf("cannot resolve with wrong FinaliseIndex, expect: %v, now: %v", txCnt, s.nextFinaliseIndex) @@ -750,6 +733,9 @@ func (s *MVStates) ResolveTxDAG(txCnt int) (TxDAG, error) { txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { + if s.rwSets[i].cannotGasFeeDelay { + return NewEmptyTxDAG(), nil + } deps := s.txDepCache[i].TxIndexes if len(deps) <= (txCnt-1)/2 { txDAG.TxDeps[i] = s.txDepCache[i] diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 2f3eba3828..5275c7a8dc 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -25,7 +25,11 @@ func TestMVStates_SimpleResolveTxDAG(t *testing.T) { mockRWSet(0, []interface{}{"0x00"}, []interface{}{"0x00"}), mockRWSet(1, []interface{}{"0x01"}, []interface{}{"0x01"}), mockRWSet(2, []interface{}{"0x02"}, []interface{}{"0x02"}), + mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), + mockRWSet(3, []interface{}{"0x03"}, []interface{}{"0x03"}), mockRWSet(3, []interface{}{"0x00", "0x03"}, []interface{}{"0x03"}), + }) + finaliseRWSets(t, ms, []*RWSet{ mockRWSet(4, []interface{}{"0x00", "0x04"}, []interface{}{"0x04"}), mockRWSet(5, []interface{}{"0x01", "0x02", "0x05"}, []interface{}{"0x05"}), mockRWSet(6, []interface{}{"0x02", "0x05", "0x06"}, []interface{}{"0x06"}), @@ -36,10 +40,24 @@ func TestMVStates_SimpleResolveTxDAG(t *testing.T) { dag, err := ms.ResolveTxDAG(10) require.NoError(t, err) + time.Sleep(10 * time.Millisecond) + ms.Stop() require.Equal(t, mockSimpleDAG(), dag) t.Log(dag) } +func TestMVStates_ResolveTxDAG_Async(t *testing.T) { + txCnt := 10000 + rwSets := mockRandomRWSet(txCnt) + ms1 := NewMVStates(txCnt, nil).EnableAsyncGen() + for i := 0; i < txCnt; i++ { + require.NoError(t, ms1.FinaliseWithRWSet(rwSets[i])) + } + time.Sleep(100 * time.Millisecond) + _, err := ms1.ResolveTxDAG(txCnt) + require.NoError(t, err) +} + func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { txCnt := 3000 rwSets := mockRandomRWSet(txCnt) @@ -221,7 +239,18 @@ func TestTxRecorder_Basic(t *testing.T) { } dag, err := ms.ResolveTxDAG(3) require.NoError(t, err) - t.Log(dag) + require.Equal(t, "[]\n[0]\n[1]\n", dag.(*PlainTxDAG).String()) +} + +func TestTxRecorder_CannotDelayGasFee(t *testing.T) { + ms := NewMVStates(0, nil).EnableAsyncGen() + ms.RecordNewTx(0) + ms.RecordNewTx(1) + ms.RecordCannotDelayGasFee() + ms.RecordNewTx(2) + dag, err := ms.ResolveTxDAG(3) + require.NoError(t, err) + require.Equal(t, NewEmptyTxDAG(), dag) } func mockRWSet(index int, read []interface{}, write []interface{}) *RWSet { diff --git a/miner/worker.go b/miner/worker.go index 3a94758861..5ce24b29e5 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1424,7 +1424,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { wg.Add(1) go func() { defer wg.Done() - if newWork.state.MVStates() != nil { + if w.chain.TxDAGEnabledWhenMine() { newWork.state.MVStates().EnableAsyncGen() } err := w.fillTransactions(interrupt, newWork) @@ -1436,7 +1436,7 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { isBuildBlockInterruptCounter.Inc(1) } }() - if work.state.MVStates() != nil { + if w.chain.TxDAGEnabledWhenMine() { work.state.MVStates().EnableAsyncGen() } err := w.fillTransactionsAndBundles(interrupt, work) @@ -1444,10 +1444,13 @@ func (w *worker) generateWork(genParams *generateParams) *newPayloadResult { timer.Stop() // don't need timeout interruption any more if errors.Is(err, errFillBundleInterrupted) { log.Warn("fill bundles is interrupted, discard", "err", err) - work = newWork + work, newWork = newWork, work + } + if w.chain.TxDAGEnabledWhenMine() { + newWork.state.MVStates().Stop() } } else { - if work.state.MVStates() != nil { + if w.chain.TxDAGEnabledWhenMine() { work.state.MVStates().EnableAsyncGen() } err := w.fillTransactions(interrupt, work) From 44a78b858e06ff8f34281f62128bb4b8b62b1c16 Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 6 Sep 2024 11:42:13 +0800 Subject: [PATCH 24/42] txdag: fix record panic after stop the async gen; worker: add UT to test txdag gasless block generation; --- core/state/statedb.go | 2 + core/types/mvstates.go | 15 +++--- miner/payload_building_test.go | 2 +- miner/worker.go | 4 +- miner/worker_test.go | 99 ++++++++++++++++++++++++++++++---- 5 files changed, 104 insertions(+), 18 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 9bb623c72b..1907a9b8eb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1713,6 +1713,8 @@ func (s *StateDB) StartTxRecorder(isExcludeTx bool) { if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { log.Error("MVStates SystemTx Finalise err", "err", err) } + s.mvStates.RecordReadDone() + s.mvStates.RecordWriteDone() return } s.mvStates.RecordNewTx(s.txIndex) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 300e0adf48..c7e88a0283 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -386,6 +386,9 @@ func (s *MVStates) finalisePreviousRWSet() { } func (s *MVStates) RecordNewTx(index int) { + if !s.asyncRunning { + return + } if s.rwEventCacheIndex < len(s.rwEventCache) { s.rwEventCache[s.rwEventCacheIndex].Event = NewTxRWEvent s.rwEventCache[s.rwEventCacheIndex].Index = index @@ -410,7 +413,7 @@ func (s *MVStates) RecordWriteDone() { } func (s *MVStates) RecordAccountRead(addr common.Address, state AccountState) { - if !s.recordingRead { + if !s.asyncRunning || !s.recordingRead { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -429,7 +432,7 @@ func (s *MVStates) RecordAccountRead(addr common.Address, state AccountState) { } func (s *MVStates) RecordStorageRead(addr common.Address, slot common.Hash) { - if !s.recordingRead { + if !s.asyncRunning || !s.recordingRead { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -448,7 +451,7 @@ func (s *MVStates) RecordStorageRead(addr common.Address, slot common.Hash) { } func (s *MVStates) RecordAccountWrite(addr common.Address, state AccountState) { - if !s.recordingWrite { + if !s.asyncRunning || !s.recordingWrite { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -467,7 +470,7 @@ func (s *MVStates) RecordAccountWrite(addr common.Address, state AccountState) { } func (s *MVStates) RecordStorageWrite(addr common.Address, slot common.Hash) { - if !s.recordingWrite { + if !s.asyncRunning || !s.recordingWrite { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -486,7 +489,7 @@ func (s *MVStates) RecordStorageWrite(addr common.Address, slot common.Hash) { } func (s *MVStates) RecordCannotDelayGasFee() { - if !s.recordingWrite { + if !s.asyncRunning || !s.recordingWrite { return } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -501,7 +504,7 @@ func (s *MVStates) RecordCannotDelayGasFee() { } func (s *MVStates) BatchRecordHandle() { - if s.rwEventCacheIndex == 0 { + if !s.asyncRunning || s.rwEventCacheIndex == 0 { return } s.rwEventCh <- s.rwEventCache[:s.rwEventCacheIndex] diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index cb1220fbfa..89ffd4d63f 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -44,7 +44,7 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool) { db = rawdb.NewMemoryDatabase() recipient = common.HexToAddress("0xdeadbeef") ) - w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0) + w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0, nil, nil) defer w.close() const numInterruptTxs = 256 diff --git a/miner/worker.go b/miner/worker.go index 5ce24b29e5..f5bda5f74a 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1058,6 +1058,7 @@ func (w *worker) appendTxDAG(env *environment) { log.Warn("failed to generate DAG tx", "err", err) return } + env.state.SetTxContext(txForDAG.Hash(), env.tcount) _, err = w.commitTransaction(env, txForDAG) if err != nil { log.Warn("failed to commit DAG tx", "err", err) @@ -1108,8 +1109,7 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) } - enc, _ := types.EncodeTxDAG(txDAG) - log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "enc", len(enc), "data", len(data), "dag", txDAG) + log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "data", len(data), "dag", txDAG) // Create the transaction tx := types.NewTx(&types.LegacyTx{ Nonce: nonce, diff --git a/miner/worker_test.go b/miner/worker_test.go index 1c19e60de9..495d52d6e7 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -115,7 +115,8 @@ type testWorkerBackend struct { genesis *core.Genesis } -func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { +func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, + db ethdb.Database, n int, overrideVMConfig *vm.Config) *testWorkerBackend { var gspec = &core.Genesis{ Config: chainConfig, Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, @@ -131,7 +132,11 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine default: t.Fatalf("unexpected consensus engine type: %T", engine) } - chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) + vmConfig := vm.Config{} + if overrideVMConfig != nil { + vmConfig = *overrideVMConfig + } + chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vmConfig, nil, nil) if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) } @@ -160,11 +165,16 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { return tx } -func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { - backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) +func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, + blocks int, overrideConfig *Config, overrideVMConfig *vm.Config) (*worker, *testWorkerBackend) { + backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks, overrideVMConfig) backend.txPool.Add(pendingTxs, true, false) time.Sleep(500 * time.Millisecond) // Wait for txs to be promoted - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + cfg := testConfig + if overrideConfig != nil { + cfg = overrideConfig + } + w := newWorker(cfg, chainConfig, engine, backend, new(event.TypeMux), nil, false) w.setEtherbase(testBankAddress) return w, backend } @@ -178,7 +188,7 @@ func TestGenerateAndImportBlock(t *testing.T) { config.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} engine := clique.New(config.Clique, db) - w, b := newTestWorker(t, &config, engine, db, 0) + w, b := newTestWorker(t, &config, engine, db, 0, nil, nil) defer w.close() // This test chain imports the mined blocks. @@ -214,6 +224,77 @@ func TestGenerateAndImportBlock(t *testing.T) { } } +func TestGenerateTxDAGGaslessBlock(t *testing.T) { + generateTxDAGGaslessBlock(t, true, true) + generateTxDAGGaslessBlock(t, true, false) + generateTxDAGGaslessBlock(t, false, true) + generateTxDAGGaslessBlock(t, false, false) +} + +func generateTxDAGGaslessBlock(t *testing.T, enableMev, enableTxDAG bool) { + t.Log("generateTxDAGGaslessBlock", enableMev, enableTxDAG) + var ( + db = rawdb.NewMemoryDatabase() + config = *params.AllCliqueProtocolChanges + ) + config.Optimism = ¶ms.OptimismConfig{ + EIP1559Elasticity: 2, + EIP1559Denominator: 8, + EIP1559DenominatorCanyon: 8, + } + cfg := Config{} + cfg = *testConfig + if enableMev { + cfg.Mev.MevEnabled = true + } + cfg.NewPayloadTimeout = 3 * time.Second + cfg.ParallelTxDAGSenderPriv, _ = crypto.ToECDSA(crypto.Keccak256([]byte{1})) + vmConfig := vm.Config{NoBaseFee: true} + engine := clique.New(config.Clique, db) + + w, b := newTestWorker(t, &config, engine, db, 0, &cfg, &vmConfig) + defer w.close() + if enableTxDAG { + w.chain.SetupTxDAGGeneration() + } + + // Ignore empty commit here for less noise. + w.skipSealHook = func(task *task) bool { + return len(task.receipts) == 0 + } + + // Start mining! + w.start() + + for i := 0; i < 5; i++ { + b.txPool.Add([]*types.Transaction{b.newRandomTx(true)}, true, false) + b.txPool.Add([]*types.Transaction{b.newRandomTx(false)}, true, false) + time.Sleep(1 * time.Second) // Wait for txs to be promoted + + block := w.getSealingBlock(&generateParams{ + timestamp: uint64(time.Now().Unix()), + forceTime: false, + parentHash: common.Hash{}, + coinbase: common.Address{}, + random: common.Hash{}, + withdrawals: nil, + beaconRoot: nil, + noTxs: false, + txs: types.Transactions{ + types.NewTx(&types.DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 50, + })}, + gasLimit: nil, + interrupt: nil, + isUpdate: false, + }) + txDAG, _ := types.GetTxDAG(block.block) + t.Log("block", block.block.NumberU64(), "txs", len(block.block.Transactions()), "txdag", txDAG) + } +} + func TestEmptyWorkEthash(t *testing.T) { t.Parallel() testEmptyWork(t, ethashChainConfig, ethash.NewFaker()) @@ -226,7 +307,7 @@ func TestEmptyWorkClique(t *testing.T) { func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, nil, nil) defer w.close() taskCh := make(chan struct{}, 2) @@ -271,7 +352,7 @@ func TestAdjustIntervalClique(t *testing.T) { func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, nil, nil) defer w.close() w.skipSealHook = func(task *task) bool { @@ -376,7 +457,7 @@ func TestGetSealingWorkPostMerge(t *testing.T) { func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) + w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, nil, nil) defer w.close() w.setExtra([]byte{0x01, 0x02}) From 31263e7bb6bbdfc197054677e1a4bb82bf83e555 Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 6 Sep 2024 14:26:37 +0800 Subject: [PATCH 25/42] txdag: fix the last tx dep wrong when mining; --- core/state/statedb.go | 2 +- core/state_processor.go | 2 +- core/types/mvstates.go | 12 ++++++------ miner/worker.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 1907a9b8eb..34e271d4c4 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1707,7 +1707,7 @@ func (s *StateDB) StartTxRecorder(isExcludeTx bool) { if s.mvStates == nil { return } - log.Debug("StartTxRecorder", "tx", s.txIndex) + //log.Debug("StartTxRecorder", "tx", s.txIndex) if isExcludeTx { rwSet := types.NewRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { diff --git a/core/state_processor.go b/core/state_processor.go index dd0aaeb008..4811de6d88 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -129,7 +129,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg dag, err := statedb.ResolveTxDAG(len(block.Transactions())) if err == nil { // TODO(galaio): check TxDAG correctness? - log.Debug("Process TxDAG result", "block", block.NumberU64(), "tx", len(block.Transactions()), "txDAG", dag) + log.Debug("Process TxDAG result", "block", block.NumberU64(), "tx", len(block.Transactions()), "txDAG", dag.TxCount()) if metrics.EnabledExpensive { go types.EvaluateTxDAGPerformance(dag) } diff --git a/core/types/mvstates.go b/core/types/mvstates.go index c7e88a0283..1a47a9c3d1 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -515,8 +515,8 @@ func (s *MVStates) BatchRecordHandle() { func (s *MVStates) stopAsyncRecorder() { if s.asyncRunning { - s.asyncRunning = false s.BatchRecordHandle() + s.asyncRunning = false close(s.rwEventCh) s.asyncWG.Wait() } @@ -538,9 +538,9 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { return err } s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) - log.Debug("Finalise the reads/writes", "index", i, - "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), - "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) + //log.Debug("Finalise the reads/writes", "index", i, + // "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), + // "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) } return nil @@ -669,7 +669,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { } } } - log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) + //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) // clear redundancy deps compared with prev preDeps := depMap.deps() for _, prev := range preDeps { @@ -677,7 +677,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { depMap.remove(tx) } } - log.Debug("resolveDepsMapCacheByWrites after clean", "tx", index, "deps", depMap.deps()) + //log.Debug("resolveDepsMapCacheByWrites after clean", "tx", index, "deps", depMap.deps()) s.txDepCache[index] = NewTxDep(depMap.deps()) } diff --git a/miner/worker.go b/miner/worker.go index f5bda5f74a..9215f6d5f0 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -912,7 +912,7 @@ func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*typ ) receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { - log.Debug("ApplyTransaction err", "block", env.header.Number.Uint64(), "tx", env.tcount, "err", err) + //log.Debug("ApplyTransaction err", "block", env.header.Number.Uint64(), "tx", env.tcount, "err", err) env.state.RevertToSnapshot(snap) env.gasPool.SetGas(gp) } @@ -1109,7 +1109,7 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) } - log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "data", len(data), "dag", txDAG) + //log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "data", len(data), "dag", txDAG) // Create the transaction tx := types.NewTx(&types.LegacyTx{ Nonce: nonce, From 778fa7362c6f68e40131ff4b8185fba853ed7cbd Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 6 Sep 2024 20:27:00 +0800 Subject: [PATCH 26/42] txdag: opt 100% conflict scenario perf, reduce send chan frequency; --- core/types/mvstates.go | 88 +++++++++++++++++++------------------ core/types/mvstates_test.go | 48 ++++++++++++++++++++ 2 files changed, 94 insertions(+), 42 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 1a47a9c3d1..ce78f1ae87 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -26,7 +26,7 @@ const ( func init() { for i := 0; i < initRWEventCacheSize; i++ { - cache := make([]RWEventItem, 200) + cache := make([]RWEventItem, 10000) rwEventCachePool.Put(&cache) } } @@ -172,17 +172,17 @@ type RWEventItem struct { Slot common.Hash } -type PendingWrites struct { +type StateWrites struct { list []int } -func NewPendingWrites() *PendingWrites { - return &PendingWrites{ +func NewStateWrites() *StateWrites { + return &StateWrites{ list: make([]int, 0), } } -func (w *PendingWrites) Append(pw int) { +func (w *StateWrites) Append(pw int) { if i, found := w.SearchTxIndex(pw); found { w.list[i] = pw return @@ -197,7 +197,7 @@ func (w *PendingWrites) Append(pw int) { } } -func (w *PendingWrites) SearchTxIndex(txIndex int) (int, bool) { +func (w *StateWrites) SearchTxIndex(txIndex int) (int, bool) { n := len(w.list) i, j := 0, n for i < j { @@ -212,19 +212,19 @@ func (w *PendingWrites) SearchTxIndex(txIndex int) (int, bool) { return i, i < n && w.list[i] == txIndex } -func (w *PendingWrites) FindPrevWrites(txIndex int) []int { +func (w *StateWrites) FindLastWrite(txIndex int) int { var i, _ = w.SearchTxIndex(txIndex) for j := i - 1; j >= 0; j-- { if w.list[j] < txIndex { - return w.list[:j+1] + return w.list[j] } } - return nil + return -1 } -func (w *PendingWrites) Copy() *PendingWrites { - np := &PendingWrites{} +func (w *StateWrites) Copy() *StateWrites { + np := &StateWrites{} for i, item := range w.list { np.list[i] = item } @@ -240,8 +240,8 @@ var ( type MVStates struct { rwSets map[int]*RWSet - pendingAccWriteSet map[common.Address]map[AccountState]*PendingWrites - pendingSlotWriteSet map[common.Address]map[common.Hash]*PendingWrites + pendingAccWriteSet map[common.Address]map[AccountState]*StateWrites + pendingSlotWriteSet map[common.Address]map[common.Hash]*StateWrites nextFinaliseIndex int gasFeeReceivers []common.Address // dependency map cache for generating TxDAG @@ -264,8 +264,8 @@ type MVStates struct { func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { m := &MVStates{ rwSets: make(map[int]*RWSet, txCount), - pendingAccWriteSet: make(map[common.Address]map[AccountState]*PendingWrites, txCount), - pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*PendingWrites, txCount), + pendingAccWriteSet: make(map[common.Address]map[AccountState]*StateWrites, txCount), + pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*StateWrites, txCount), txDepCache: make(map[int]TxDep, txCount), rwEventCh: make(chan []RWEventItem, 100), gasFeeReceivers: gasFeeReceivers, @@ -301,7 +301,7 @@ func (s *MVStates) Copy() *MVStates { for addr, sub := range s.pendingAccWriteSet { for state, writes := range sub { if _, ok := ns.pendingAccWriteSet[addr]; !ok { - ns.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + ns.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) } ns.pendingAccWriteSet[addr][state] = writes.Copy() } @@ -309,7 +309,7 @@ func (s *MVStates) Copy() *MVStates { for addr, sub := range s.pendingSlotWriteSet { for slot, writes := range sub { if _, ok := ns.pendingSlotWriteSet[addr]; !ok { - ns.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) + ns.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) } ns.pendingSlotWriteSet[addr][slot] = writes.Copy() } @@ -401,7 +401,9 @@ func (s *MVStates) RecordNewTx(index int) { s.rwEventCacheIndex++ s.recordingRead = true s.recordingWrite = true - s.BatchRecordHandle() + if index%10 == 0 { + s.BatchRecordHandle() + } } func (s *MVStates) RecordReadDone() { @@ -565,22 +567,22 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { // append to pending write set for addr, sub := range rwSet.accWriteSet { if _, exist := s.pendingAccWriteSet[addr]; !exist { - s.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + s.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) } for state := range sub { if _, exist := s.pendingAccWriteSet[addr][state]; !exist { - s.pendingAccWriteSet[addr][state] = NewPendingWrites() + s.pendingAccWriteSet[addr][state] = NewStateWrites() } s.pendingAccWriteSet[addr][state].Append(index) } } for addr, sub := range rwSet.slotWriteSet { if _, exist := s.pendingSlotWriteSet[addr]; !exist { - s.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) + s.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) } for slot := range sub { if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { - s.pendingSlotWriteSet[addr][slot] = NewPendingWrites() + s.pendingSlotWriteSet[addr][slot] = NewStateWrites() } s.pendingSlotWriteSet[addr][slot].Append(index) } @@ -591,10 +593,10 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common.Hash) { // append to pending write set if _, exist := s.pendingSlotWriteSet[addr]; !exist { - s.pendingSlotWriteSet[addr] = make(map[common.Hash]*PendingWrites) + s.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) } if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { - s.pendingSlotWriteSet[addr][slot] = NewPendingWrites() + s.pendingSlotWriteSet[addr][slot] = NewStateWrites() } s.pendingSlotWriteSet[addr][slot].Append(index) } @@ -602,22 +604,22 @@ func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state AccountState) { // append to pending write set if _, exist := s.pendingAccWriteSet[addr]; !exist { - s.pendingAccWriteSet[addr] = make(map[AccountState]*PendingWrites) + s.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) } if _, exist := s.pendingAccWriteSet[addr][state]; !exist { - s.pendingAccWriteSet[addr][state] = NewPendingWrites() + s.pendingAccWriteSet[addr][state] = NewStateWrites() } s.pendingAccWriteSet[addr][state].Append(index) } -func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *PendingWrites { +func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *StateWrites { if _, exist := s.pendingAccWriteSet[addr]; !exist { return nil } return s.pendingAccWriteSet[addr][state] } -func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *PendingWrites { +func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *StateWrites { if _, exist := s.pendingSlotWriteSet[addr]; !exist { return nil } @@ -643,14 +645,15 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { if writes == nil { continue } - items := writes.FindPrevWrites(index) - for _, item := range items { - tx := uint64(item) - if depMap.exist(tx) { - continue - } - depMap.add(tx) + find := writes.FindLastWrite(index) + if find < 0 { + continue } + tx := uint64(find) + if depMap.exist(tx) { + continue + } + depMap.add(tx) } } for addr, sub := range rwSet.slotReadSet { @@ -659,14 +662,15 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { if writes == nil { continue } - items := writes.FindPrevWrites(index) - for _, item := range items { - tx := uint64(item) - if depMap.exist(tx) { - continue - } - depMap.add(tx) + find := writes.FindLastWrite(index) + if find < 0 { + continue + } + tx := uint64(find) + if depMap.exist(tx) { + continue } + depMap.add(tx) } } //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 5275c7a8dc..6c9103b215 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -129,6 +129,30 @@ func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { } } +func BenchmarkResolveTxDAGByWritesInMVStates_100PercentConflict(b *testing.B) { + rwSets := mockSameRWSet(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() + for _, rwSet := range rwSets { + ms1.FinaliseWithRWSet(rwSet) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolveDepsMapCacheByWritesInMVStates(ms1) + } +} + +func BenchmarkResolveTxDAGByWritesInMVStates_0PercentConflict(b *testing.B) { + rwSets := mockDifferentRWSet(mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() + for _, rwSet := range rwSets { + ms1.FinaliseWithRWSet(rwSet) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + resolveDepsMapCacheByWritesInMVStates(ms1) + } +} + func BenchmarkMVStates_Finalise(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() @@ -314,6 +338,30 @@ func mockRandomRWSet(count int) []*RWSet { return ret } +func mockSameRWSet(count int) []*RWSet { + var ret []*RWSet + for i := 0; i < count; i++ { + read := []interface{}{"0xa0", "0xa1", fmt.Sprintf("0x%d", i), fmt.Sprintf("0x%d", i)} + write := []interface{}{"0xa0", fmt.Sprintf("0x%d", i)} + // random write + s := mockRWSet(i, read, write) + ret = append(ret, s) + } + return ret +} + +func mockDifferentRWSet(count int) []*RWSet { + var ret []*RWSet + for i := 0; i < count; i++ { + read := []interface{}{fmt.Sprintf("0x%d", i), fmt.Sprintf("0x%d", i)} + write := []interface{}{fmt.Sprintf("0x%d", i)} + // random write + s := mockRWSet(i, read, write) + ret = append(ret, s) + } + return ret +} + func finaliseRWSets(t *testing.T, mv *MVStates, rwSets []*RWSet) { for _, rwSet := range rwSets { require.NoError(t, mv.FinaliseWithRWSet(rwSet)) From 1b3b4984c17965f4bfcd2b9290779c962fa8dfeb Mon Sep 17 00:00:00 2001 From: galaio Date: Fri, 6 Sep 2024 21:44:47 +0800 Subject: [PATCH 27/42] txdag: opt rwset string format; --- core/types/mvstates.go | 79 ++++++++++++++++++++++++++----------- core/types/mvstates_test.go | 1 + 2 files changed, 57 insertions(+), 23 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index ce78f1ae87..23814be38c 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -115,43 +115,76 @@ func (s *RWSet) WithExcludedTxFlag() *RWSet { func (s *RWSet) String() string { builder := strings.Builder{} - builder.WriteString(fmt.Sprintf("tx: %v\nreadSet: [", s.index)) + builder.WriteString(fmt.Sprintf("{tx: %v", s.index)) + builder.WriteString(", accReadSet: [") i := 0 - for key, _ := range s.accReadSet { + for addr, sub := range s.accReadSet { if i > 0 { - builder.WriteString(fmt.Sprintf(", %v", key.String())) - continue + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("{addr: \"%v\", states: [", addr)) + j := 0 + for key := range sub { + if j > 0 { + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("%v", key)) + j++ } - builder.WriteString(fmt.Sprintf("%v", key.String())) - i++ + builder.WriteString("]}") } - for key, _ := range s.slotReadSet { + builder.WriteString("], slotReadSet: [") + i = 0 + for addr, sub := range s.slotReadSet { if i > 0 { - builder.WriteString(fmt.Sprintf(", %v", key.String())) - continue + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("{addr: \"%v\", slots: [", addr)) + j := 0 + for key := range sub { + if j > 0 { + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("\"%v\"", key.String())) + j++ } - builder.WriteString(fmt.Sprintf("%v", key.String())) - i++ + builder.WriteString("]}") } - builder.WriteString("]\nwriteSet: [") + builder.WriteString("], accWriteSet: [") i = 0 - for key, _ := range s.accWriteSet { + for addr, sub := range s.accWriteSet { if i > 0 { - builder.WriteString(fmt.Sprintf(", %v", key.String())) - continue + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("{addr: \"%v\", states: [", addr)) + j := 0 + for key := range sub { + if j > 0 { + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("%v", key)) + j++ } - builder.WriteString(fmt.Sprintf("%v", key.String())) - i++ + builder.WriteString("]}") } - for key, _ := range s.slotWriteSet { + builder.WriteString("], slotWriteSet: [") + i = 0 + for addr, sub := range s.slotWriteSet { if i > 0 { - builder.WriteString(fmt.Sprintf(", %v", key.String())) - continue + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("{addr: \"%v\", slots: [", addr)) + j := 0 + for key := range sub { + if j > 0 { + builder.WriteString(", ") + } + builder.WriteString(fmt.Sprintf("\"%v\"", key.String())) + j++ } - builder.WriteString(fmt.Sprintf("%v", key.String())) - i++ + builder.WriteString("]}") } - builder.WriteString("]\n") + builder.WriteString("]}") return builder.String() } diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 6c9103b215..58278fa4fa 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -239,6 +239,7 @@ func TestTxRecorder_Basic(t *testing.T) { } ms := NewMVStates(0, nil).EnableAsyncGen() for _, item := range sets { + t.Log(item) ms.RecordNewTx(item.index) for addr, sub := range item.accReadSet { for state := range sub { From d0526ccbd5202d7e4b842c38bb02270be604c75b Mon Sep 17 00:00:00 2001 From: galaio Date: Sat, 7 Sep 2024 11:15:58 +0800 Subject: [PATCH 28/42] txdag: opt rwset string format; --- core/types/mvstates.go | 4 ++++ core/types/mvstates_test.go | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 23814be38c..596fec5c3c 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -131,6 +131,7 @@ func (s *RWSet) String() string { builder.WriteString(fmt.Sprintf("%v", key)) j++ } + i++ builder.WriteString("]}") } builder.WriteString("], slotReadSet: [") @@ -148,6 +149,7 @@ func (s *RWSet) String() string { builder.WriteString(fmt.Sprintf("\"%v\"", key.String())) j++ } + i++ builder.WriteString("]}") } builder.WriteString("], accWriteSet: [") @@ -165,6 +167,7 @@ func (s *RWSet) String() string { builder.WriteString(fmt.Sprintf("%v", key)) j++ } + i++ builder.WriteString("]}") } builder.WriteString("], slotWriteSet: [") @@ -182,6 +185,7 @@ func (s *RWSet) String() string { builder.WriteString(fmt.Sprintf("\"%v\"", key.String())) j++ } + i++ builder.WriteString("]}") } builder.WriteString("]}") diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 58278fa4fa..7801a1bf79 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -239,7 +239,6 @@ func TestTxRecorder_Basic(t *testing.T) { } ms := NewMVStates(0, nil).EnableAsyncGen() for _, item := range sets { - t.Log(item) ms.RecordNewTx(item.index) for addr, sub := range item.accReadSet { for state := range sub { @@ -267,6 +266,17 @@ func TestTxRecorder_Basic(t *testing.T) { require.Equal(t, "[]\n[0]\n[1]\n", dag.(*PlainTxDAG).String()) } +func TestRWSet(t *testing.T) { + set := NewRWSet(0) + mockRWSetWithAddr(set, common.Address{1}, []interface{}{AccountSelf, AccountBalance, "0x00"}, + []interface{}{AccountBalance, AccountCodeHash, "0x00"}) + mockRWSetWithAddr(set, common.Address{2}, []interface{}{AccountSelf, AccountBalance, "0x01"}, + []interface{}{AccountBalance, AccountCodeHash, "0x01"}) + mockRWSetWithAddr(set, common.Address{3}, []interface{}{AccountSelf, AccountBalance, "0x01", "0x01"}, + []interface{}{AccountBalance, AccountCodeHash, "0x01"}) + t.Log(set) +} + func TestTxRecorder_CannotDelayGasFee(t *testing.T) { ms := NewMVStates(0, nil).EnableAsyncGen() ms.RecordNewTx(0) @@ -304,6 +314,31 @@ func mockRWSet(index int, read []interface{}, write []interface{}) *RWSet { return set } +func mockRWSetWithAddr(set *RWSet, addr common.Address, read []interface{}, write []interface{}) *RWSet { + set.accReadSet[addr] = map[AccountState]struct{}{} + set.accWriteSet[addr] = map[AccountState]struct{}{} + set.slotReadSet[addr] = map[common.Hash]struct{}{} + set.slotWriteSet[addr] = map[common.Hash]struct{}{} + for _, k := range read { + state, ok := k.(AccountState) + if ok { + set.accReadSet[addr][state] = struct{}{} + } else { + set.slotReadSet[addr][str2Slot(k.(string))] = struct{}{} + } + } + for _, k := range write { + state, ok := k.(AccountState) + if ok { + set.accWriteSet[addr][state] = struct{}{} + } else { + set.slotWriteSet[addr][str2Slot(k.(string))] = struct{}{} + } + } + + return set +} + func str2Slot(str string) common.Hash { return common.BytesToHash([]byte(str)) } From bf696358375ab6c37247173eb4cddb124704a8c5 Mon Sep 17 00:00:00 2001 From: galaio Date: Tue, 10 Sep 2024 14:28:27 +0800 Subject: [PATCH 29/42] txdag: reduce more mem usage; --- core/state/statedb.go | 4 +- core/types/dag.go | 2 + core/types/gen_plaintxdag_rlp.go | 32 ++++ core/types/mvstates.go | 259 +++++++++++++++++++++++-------- core/types/mvstates_test.go | 138 ++++++++++++++++ miner/worker.go | 5 +- 6 files changed, 373 insertions(+), 67 deletions(-) create mode 100644 core/types/gen_plaintxdag_rlp.go diff --git a/core/state/statedb.go b/core/state/statedb.go index 34e271d4c4..9d03881005 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1777,7 +1777,7 @@ func (s *StateDB) removeStateObjectsDestruct(addr common.Address) { delete(s.stateObjectsDestructDirty, addr) } -func (s *StateDB) ResolveTxDAG(txCnt int) (types.TxDAG, error) { +func (s *StateDB) ResolveTxDAG(txCnt int, extraTxDeps ...types.TxDep) (types.TxDAG, error) { if s.mvStates == nil { return types.NewEmptyTxDAG(), nil } @@ -1787,7 +1787,7 @@ func (s *StateDB) ResolveTxDAG(txCnt int) (types.TxDAG, error) { }(time.Now()) } - return s.mvStates.ResolveTxDAG(txCnt) + return s.mvStates.ResolveTxDAG(txCnt, extraTxDeps...) } func (s *StateDB) MVStates() *types.MVStates { diff --git a/core/types/dag.go b/core/types/dag.go index 2bdcb46e97..f53e233b52 100644 --- a/core/types/dag.go +++ b/core/types/dag.go @@ -253,6 +253,8 @@ func (d *EmptyTxDAG) String() string { } // PlainTxDAG indicate how to use the dependency of txs, and delay the distribution of GasFee +// +//go:generate go run ../../rlp/rlpgen -type PlainTxDAG -out gen_plaintxdag_rlp.go type PlainTxDAG struct { // Tx Dependency List, the list index is equal to TxIndex TxDeps []TxDep diff --git a/core/types/gen_plaintxdag_rlp.go b/core/types/gen_plaintxdag_rlp.go new file mode 100644 index 0000000000..9e3ea46683 --- /dev/null +++ b/core/types/gen_plaintxdag_rlp.go @@ -0,0 +1,32 @@ +// Code generated by rlpgen. DO NOT EDIT. + +package types + +import "github.com/ethereum/go-ethereum/rlp" +import "io" + +func (obj *PlainTxDAG) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + _tmp1 := w.List() + for _, _tmp2 := range obj.TxDeps { + _tmp3 := w.List() + _tmp4 := w.List() + for _, _tmp5 := range _tmp2.TxIndexes { + w.WriteUint64(_tmp5) + } + w.ListEnd(_tmp4) + _tmp6 := _tmp2.Flags != nil + if _tmp6 { + if _tmp2.Flags == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64(uint64((*_tmp2.Flags))) + } + } + w.ListEnd(_tmp3) + } + w.ListEnd(_tmp1) + w.ListEnd(_tmp0) + return w.Flush() +} diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 596fec5c3c..ddd306bb07 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -21,12 +21,12 @@ var ( ) const ( - initRWEventCacheSize = 4 + initRWEventCacheSize = 40 ) func init() { for i := 0; i < initRWEventCacheSize; i++ { - cache := make([]RWEventItem, 10000) + cache := make([]RWEventItem, 200) rwEventCachePool.Put(&cache) } } @@ -55,6 +55,12 @@ func NewRWSet(index int) *RWSet { } } +func NewEmptyRWSet(index int) *RWSet { + return &RWSet{ + index: index, + } +} + func (s *RWSet) RecordAccountRead(addr common.Address, state AccountState) { // only record the first read version sub, ok := s.accReadSet[addr] @@ -362,70 +368,83 @@ func (s *MVStates) asyncRWEventLoop() { if !ok { return } - for _, item := range items { - s.handleRWEvent(item) - } + s.handleRWEvents(items) rwEventCachePool.Put(&items) } } } -func (s *MVStates) handleRWEvent(item RWEventItem) { - s.lock.Lock() - defer s.lock.Unlock() - // init next RWSet, and finalise previous RWSet - if item.Event == NewTxRWEvent { - s.finalisePreviousRWSet() - s.asyncRWSet = NewRWSet(item.Index) - return - } - if s.asyncRWSet == nil { - return +func (s *MVStates) handleRWEvents(items []RWEventItem) { + readFrom, readTo := -1, -1 + recordNewTx := false + for i, item := range items { + // init next RWSet, and finalise previous RWSet + if item.Event == NewTxRWEvent { + // handle previous rw set + if recordNewTx { + var prevItems []RWEventItem + if readFrom >= 0 && readTo > readFrom { + prevItems = items[readFrom:readTo] + } + s.finalisePreviousRWSet(prevItems) + readFrom, readTo = -1, -1 + } + recordNewTx = true + s.asyncRWSet = NewEmptyRWSet(item.Index) + continue + } + if s.asyncRWSet == nil { + continue + } + switch item.Event { + // recorde current read/write event + case ReadAccRWEvent, ReadSlotRWEvent: + if readFrom < 0 { + readFrom = i + } + readTo = i + 1 + case WriteAccRWEvent: + s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) + case WriteSlotRWEvent: + s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) + // recorde current as cannot gas fee delay + case CannotGasFeeDelayRWEvent: + s.asyncRWSet.cannotGasFeeDelay = true + } } - switch item.Event { - // recorde current read/write event - case ReadAccRWEvent: - s.asyncRWSet.RecordAccountRead(item.Addr, item.State) - case ReadSlotRWEvent: - s.asyncRWSet.RecordStorageRead(item.Addr, item.Slot) - case WriteAccRWEvent: - s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) - case WriteSlotRWEvent: - s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) - // recorde current as cannot gas fee delay - case CannotGasFeeDelayRWEvent: - s.asyncRWSet.cannotGasFeeDelay = true + // handle last tx rw set + if recordNewTx { + var prevItems []RWEventItem + if readFrom >= 0 && readTo > readFrom { + prevItems = items[readFrom:readTo] + } + s.finalisePreviousRWSet(prevItems) } } -func (s *MVStates) finalisePreviousRWSet() { +func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem) { if s.asyncRWSet == nil { return } index := s.asyncRWSet.index s.rwSets[index] = s.asyncRWSet - // check if there are RW with gas fee receiver for gas delay calculation - for _, addr := range s.gasFeeReceivers { - if _, exist := s.asyncRWSet.accReadSet[addr]; !exist { - continue - } - if _, exist := s.asyncRWSet.accReadSet[addr][AccountSelf]; exist { - s.rwSets[index].cannotGasFeeDelay = true - break - } - } - if err := s.innerFinalise(index, false); err != nil { - log.Error("Finalise err when handle NewTxRWEvent", "tx", index, "err", err) + if index > s.nextFinaliseIndex { + log.Error("finalise in wrong order", "next", s.nextFinaliseIndex, "input", index) return } - s.resolveDepsMapCacheByWrites(index, s.asyncRWSet) + // reset nextFinaliseIndex to index+1, it may revert to previous txs + s.nextFinaliseIndex = index + 1 + s.resolveDepsMapCacheByWrites2(index, reads) } func (s *MVStates) RecordNewTx(index int) { if !s.asyncRunning { return } + if index%10 == 0 { + s.BatchRecordHandle() + } if s.rwEventCacheIndex < len(s.rwEventCache) { s.rwEventCache[s.rwEventCacheIndex].Event = NewTxRWEvent s.rwEventCache[s.rwEventCacheIndex].Index = index @@ -438,9 +457,6 @@ func (s *MVStates) RecordNewTx(index int) { s.rwEventCacheIndex++ s.recordingRead = true s.recordingWrite = true - if index%10 == 0 { - s.BatchRecordHandle() - } } func (s *MVStates) RecordReadDone() { @@ -557,6 +573,7 @@ func (s *MVStates) stopAsyncRecorder() { s.BatchRecordHandle() s.asyncRunning = false close(s.rwEventCh) + rwEventCachePool.Put(&s.rwEventCache) s.asyncWG.Wait() } } @@ -670,7 +687,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) return } - depMap := NewTxDepMap(0) + depSlice := NewTxDepSlice(0) // check tx dependency, only check key, skip version for addr, sub := range rwSet.accReadSet { for state := range sub { @@ -687,10 +704,10 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { continue } tx := uint64(find) - if depMap.exist(tx) { + if depSlice.exist(tx) { continue } - depMap.add(tx) + depSlice.add(tx) } } for addr, sub := range rwSet.slotReadSet { @@ -704,22 +721,91 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { continue } tx := uint64(find) - if depMap.exist(tx) { + if depSlice.exist(tx) { continue } - depMap.add(tx) + depSlice.add(tx) } } + // clear redundancy deps compared with prev + preDeps := depSlice.deps() + var removed []uint64 + for _, prev := range preDeps { + for _, tx := range s.txDepCache[int(prev)].TxIndexes { + if depSlice.exist(tx) { + removed = append(removed, tx) + } + } + } + for _, tx := range removed { + depSlice.remove(tx) + } //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) + s.txDepCache[index] = NewTxDep(depSlice.deps()) +} + +// resolveDepsMapCacheByWrites2 must be executed in order +func (s *MVStates) resolveDepsMapCacheByWrites2(index int, reads []RWEventItem) { + rwSet := s.rwSets[index] + // analysis dep, if the previous transaction is not executed/validated, re-analysis is required + if rwSet.excludedTx { + s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) + return + } + depSlice := NewTxDepSlice(0) + addrMap := make(map[common.Address]struct{}) + // check tx dependency, only check key + for _, item := range reads { + // check account states & slots + var writes *StateWrites + if item.Event == ReadAccRWEvent { + writes = s.queryAccWrites(item.Addr, item.State) + } else { + writes = s.querySlotWrites(item.Addr, item.Slot) + } + if writes != nil { + if find := writes.FindLastWrite(index); find >= 0 { + if tx := uint64(find); !depSlice.exist(tx) { + depSlice.add(tx) + } + } + } + + // check again account self with Suicide + if _, ok := addrMap[item.Addr]; ok { + continue + } + addrMap[item.Addr] = struct{}{} + writes = s.queryAccWrites(item.Addr, AccountSuicide) + if writes != nil { + if find := writes.FindLastWrite(index); find >= 0 { + if tx := uint64(find); !depSlice.exist(tx) { + depSlice.add(tx) + } + } + } + } + for _, addr := range s.gasFeeReceivers { + if _, ok := addrMap[addr]; ok { + rwSet.cannotGasFeeDelay = true + break + } + } // clear redundancy deps compared with prev - preDeps := depMap.deps() + preDeps := depSlice.deps() + var removed []uint64 for _, prev := range preDeps { for _, tx := range s.txDepCache[int(prev)].TxIndexes { - depMap.remove(tx) + if depSlice.exist(tx) { + removed = append(removed, tx) + } } } - //log.Debug("resolveDepsMapCacheByWrites after clean", "tx", index, "deps", depMap.deps()) - s.txDepCache[index] = NewTxDep(depMap.deps()) + for _, tx := range removed { + depSlice.remove(tx) + } + //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depSlice.deps()) + s.txDepCache[index] = NewTxDep(depSlice.deps()) } // resolveDepsCache must be executed in order @@ -765,34 +851,37 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { } // ResolveTxDAG generate TxDAG from RWSets -func (s *MVStates) ResolveTxDAG(txCnt int) (TxDAG, error) { +func (s *MVStates) ResolveTxDAG(txCnt int, extraTxDeps ...TxDep) (TxDAG, error) { s.stopAsyncRecorder() s.lock.Lock() defer s.lock.Unlock() - s.finalisePreviousRWSet() if s.nextFinaliseIndex != txCnt { return nil, fmt.Errorf("cannot resolve with wrong FinaliseIndex, expect: %v, now: %v", txCnt, s.nextFinaliseIndex) } - txDAG := NewPlainTxDAG(txCnt) + totalCnt := txCnt + len(extraTxDeps) + txDAG := NewPlainTxDAG(totalCnt) for i := 0; i < txCnt; i++ { if s.rwSets[i].cannotGasFeeDelay { return NewEmptyTxDAG(), nil } - deps := s.txDepCache[i].TxIndexes - if len(deps) <= (txCnt-1)/2 { - txDAG.TxDeps[i] = s.txDepCache[i] + cache := s.txDepCache[i] + if len(cache.TxIndexes) <= (txCnt-1)/2 { + txDAG.TxDeps[i] = cache continue } // if tx deps larger than half of txs, then convert with NonDependentRelFlag txDAG.TxDeps[i].SetFlag(NonDependentRelFlag) for j := uint64(0); j < uint64(txCnt); j++ { - if !slices.Contains(deps, j) && j != uint64(i) { + if !slices.Contains(cache.TxIndexes, j) && j != uint64(i) { txDAG.TxDeps[i].TxIndexes = append(txDAG.TxDeps[i].TxIndexes, j) } } } + for i, j := txCnt, 0; i < totalCnt && j < len(extraTxDeps); i, j = i+1, j+1 { + txDAG.TxDeps[i] = extraTxDeps[j] + } return txDAG, nil } @@ -882,3 +971,49 @@ func (m *TxDepMap) remove(index uint64) { func (m *TxDepMap) len() int { return len(m.tm) } + +type TxDepSlice struct { + indexes []uint64 +} + +func NewTxDepSlice(cap int) *TxDepSlice { + return &TxDepSlice{ + indexes: make([]uint64, 0, cap), + } +} + +func (m *TxDepSlice) add(index uint64) { + if m.exist(index) { + return + } + m.indexes = append(m.indexes, index) + for i := len(m.indexes) - 1; i > 0; i-- { + if m.indexes[i] < m.indexes[i-1] { + m.indexes[i-1], m.indexes[i] = m.indexes[i], m.indexes[i-1] + } + } +} + +func (m *TxDepSlice) exist(index uint64) bool { + _, ok := slices.BinarySearch(m.indexes, index) + return ok +} + +func (m *TxDepSlice) deps() []uint64 { + return m.indexes +} + +func (m *TxDepSlice) remove(index uint64) { + pos, ok := slices.BinarySearch(m.indexes, index) + if !ok { + return + } + for i := pos; i < len(m.indexes)-1; i++ { + m.indexes[i] = m.indexes[i+1] + } + m.indexes = m.indexes[:len(m.indexes)-1] +} + +func (m *TxDepSlice) len() int { + return len(m.indexes) +} diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 7801a1bf79..76678216b3 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "fmt" + "sync" "testing" "time" @@ -63,14 +64,18 @@ func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { rwSets := mockRandomRWSet(txCnt) ms1 := NewMVStates(txCnt, nil).EnableAsyncGen() ms2 := NewMVStates(txCnt, nil).EnableAsyncGen() + ms3 := NewMVStates(txCnt, nil).EnableAsyncGen() for i, rwSet := range rwSets { ms1.rwSets[i] = rwSet require.NoError(t, ms2.FinaliseWithRWSet(rwSet)) + ms3.handleRWEvents(mockRWEventItemsFromRWSet(i, rwSet)) } d1 := resolveTxDAGInMVStates(ms1, txCnt) d2 := resolveDepsMapCacheByWritesInMVStates(ms2) + d3 := resolveDepsMapCacheByWrites2InMVStates(ms3) require.Equal(t, d1.(*PlainTxDAG).String(), d2.(*PlainTxDAG).String()) + require.Equal(t, d1.(*PlainTxDAG).String(), d3.(*PlainTxDAG).String()) } func TestMVStates_TxDAG_Compression(t *testing.T) { @@ -129,6 +134,22 @@ func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { } } +func BenchmarkResolveTxDAGByWrites2InMVStates(b *testing.B) { + rwSets := mockRandomRWSet(mockRWSetSize) + items := make([][]RWEventItem, mockRWSetSize) + ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() + for i, rwSet := range rwSets { + items[i] = mockRWEventItemsFromRWSet(i, rwSet) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, item := range items { + ms1.handleRWEvents(item) + } + resolveDepsMapCacheByWrites2InMVStates(ms1) + } +} + func BenchmarkResolveTxDAGByWritesInMVStates_100PercentConflict(b *testing.B) { rwSets := mockSameRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() @@ -164,6 +185,69 @@ func BenchmarkMVStates_Finalise(b *testing.B) { } } +func checkMap(m map[int][10]byte) { + for i, j := range m { + m[i] = j + } +} + +func BenchmarkEmptyMap(b *testing.B) { + for i := 0; i < b.N; i++ { + m := make(map[int][10]byte) + for j := 0; j < 10000; j++ { + m[i] = [10]byte{byte(j)} + } + checkMap(m) + } +} + +func BenchmarkInitMapWithSize(b *testing.B) { + for i := 0; i < b.N; i++ { + m := make(map[int][10]byte, 10) + for j := 0; j < 1000; j++ { + m[i] = [10]byte{byte(j)} + } + } +} + +func BenchmarkReuseMap(b *testing.B) { + sp := sync.Pool{New: func() interface{} { + return make(map[int]struct{}, 10) + }} + for i := 0; i < b.N; i++ { + m := sp.Get().(map[int]struct{}) + for j := 0; j < 1000; j++ { + m[i] = struct{}{} + } + for k := range m { + delete(m, k) + } + sp.Put(m) + } +} + +func BenchmarkExistArray(b *testing.B) { + for i := 0; i < b.N; i++ { + m := make(map[[20]byte]struct{}) + m[common.Address{1}] = struct{}{} + addr := common.Address{1} + if _, ok := m[addr]; ok { + continue + } + } +} + +func BenchmarkDonotExistArray(b *testing.B) { + for i := 0; i < b.N; i++ { + m := make(map[[20]byte]struct{}) + addr := common.Address{1} + if _, ok := m[addr]; !ok { + m[addr] = struct{}{} + delete(m, addr) + } + } +} + func resolveTxDAGInMVStates(s *MVStates, txCnt int) TxDAG { txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { @@ -183,6 +267,15 @@ func resolveDepsMapCacheByWritesInMVStates(s *MVStates) TxDAG { return txDAG } +func resolveDepsMapCacheByWrites2InMVStates(s *MVStates) TxDAG { + txCnt := s.nextFinaliseIndex + txDAG := NewPlainTxDAG(txCnt) + for i := 0; i < txCnt; i++ { + txDAG.TxDeps[i] = s.txDepCache[i] + } + return txDAG +} + func TestMVStates_SystemTxResolveTxDAG(t *testing.T) { ms := NewMVStates(12, nil).EnableAsyncGen() finaliseRWSets(t, ms, []*RWSet{ @@ -410,3 +503,48 @@ func randInRange(i, j int) (int, bool) { } return rand.Int()%(j-i) + i, true } + +func mockRWEventItemsFromRWSet(index int, rwSet *RWSet) []RWEventItem { + items := make([]RWEventItem, 0) + items = append(items, RWEventItem{ + Event: NewTxRWEvent, + Index: index, + }) + for addr, sub := range rwSet.accReadSet { + for state := range sub { + items = append(items, RWEventItem{ + Event: ReadAccRWEvent, + Addr: addr, + State: state, + }) + } + } + for addr, sub := range rwSet.slotReadSet { + for slot := range sub { + items = append(items, RWEventItem{ + Event: ReadSlotRWEvent, + Addr: addr, + Slot: slot, + }) + } + } + for addr, sub := range rwSet.accWriteSet { + for state := range sub { + items = append(items, RWEventItem{ + Event: WriteAccRWEvent, + Addr: addr, + State: state, + }) + } + } + for addr, sub := range rwSet.slotWriteSet { + for slot := range sub { + items = append(items, RWEventItem{ + Event: WriteSlotRWEvent, + Addr: addr, + Slot: slot, + }) + } + } + return items +} diff --git a/miner/worker.go b/miner/worker.go index 9215f6d5f0..a26dc0f948 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1084,12 +1084,11 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn } // get txDAG data from the stateDB - txDAG, err := statedb.ResolveTxDAG(txIndex) + // txIndex is the index of this txDAG transaction + txDAG, err := statedb.ResolveTxDAG(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) if txDAG == nil { return nil, err } - // txIndex is the index of this txDAG transaction - txDAG.SetTxDep(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) if metrics.EnabledExpensive { go types.EvaluateTxDAGPerformance(txDAG) } From 051107edc69e16b348c952918187557e0fe49abf Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 12 Sep 2024 10:39:44 +0800 Subject: [PATCH 30/42] txdag: using a new mem pool; --- core/state/statedb.go | 2 +- core/state_processor.go | 3 + core/types/mvstates.go | 260 ++++++++++++++++++++++++------------ core/types/mvstates_test.go | 154 ++++++++++----------- miner/worker.go | 6 +- 5 files changed, 253 insertions(+), 172 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 9d03881005..77c719aadc 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1709,7 +1709,7 @@ func (s *StateDB) StartTxRecorder(isExcludeTx bool) { } //log.Debug("StartTxRecorder", "tx", s.txIndex) if isExcludeTx { - rwSet := types.NewRWSet(s.txIndex).WithExcludedTxFlag() + rwSet := types.NewEmptyRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { log.Error("MVStates SystemTx Finalise err", "err", err) } diff --git a/core/state_processor.go b/core/state_processor.go index 4811de6d88..b4be7bb882 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -125,6 +125,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) if p.bc.enableTxDAG { + defer func() { + statedb.MVStates().Stop() + }() // compare input TxDAG when it enable in consensus dag, err := statedb.ResolveTxDAG(len(block.Transactions())) if err == nil { diff --git a/core/types/mvstates.go b/core/types/mvstates.go index ddd306bb07..27b204dc22 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "golang.org/x/exp/slices" ) @@ -21,14 +22,49 @@ var ( ) const ( - initRWEventCacheSize = 40 + initSyncPoolSize = 4 + asyncSendInterval = 20 ) func init() { - for i := 0; i < initRWEventCacheSize; i++ { - cache := make([]RWEventItem, 200) + for i := 0; i < initSyncPoolSize*4; i++ { + cache := make([]RWEventItem, 400) rwEventCachePool.Put(&cache) } + for i := 0; i < initSyncPoolSize; i++ { + rwSets := make([]RWSet, 4000) + rwSetsPool.Put(&rwSets) + txDeps := make([]TxDep, 4000) + txDepsPool.Put(&txDeps) + } +} + +type ChanPool struct { + ch chan any + new func() any +} + +func NewChanPool(size int, f func() any) *ChanPool { + return &ChanPool{ + ch: make(chan any, size), + new: f, + } +} + +func (p ChanPool) Get() any { + select { + case item := <-p.ch: + return item + default: + } + return p.new() +} + +func (p ChanPool) Put(item any) { + select { + case p.ch <- item: + default: + } } // RWSet record all read & write set in txs @@ -275,26 +311,46 @@ func (w *StateWrites) Copy() *StateWrites { } var ( - rwEventCachePool = sync.Pool{New: func() any { + rwEventsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwevents/cnt", nil) + rwEventsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwevents/gauge", nil) + rwSetsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwsets/cnt", nil) + rwSetsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwsets/gauge", nil) + txDepsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/txdeps/cnt", nil) + txDepsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/txdeps/gauge", nil) +) + +var ( + rwEventCachePool = NewChanPool(initSyncPoolSize*4, func() any { + rwEventsAllocMeter.Mark(1) buf := make([]RWEventItem, 0) return &buf - }} + }) + rwSetsPool = NewChanPool(initSyncPoolSize, func() any { + rwSetsAllocMeter.Mark(1) + buf := make([]RWSet, 0) + return &buf + }) + txDepsPool = NewChanPool(initSyncPoolSize, func() any { + txDepsAllocMeter.Mark(1) + buf := make([]TxDep, 0) + return &buf + }) ) type MVStates struct { - rwSets map[int]*RWSet - pendingAccWriteSet map[common.Address]map[AccountState]*StateWrites - pendingSlotWriteSet map[common.Address]map[common.Hash]*StateWrites - nextFinaliseIndex int - gasFeeReceivers []common.Address + rwSets []RWSet + accWriteSet map[common.Address]map[AccountState]*StateWrites + slotWriteSet map[common.Address]map[common.Hash]*StateWrites + nextFinaliseIndex int + gasFeeReceivers []common.Address // dependency map cache for generating TxDAG // depMapCache[i].exist(j) means j->i, and i > j - txDepCache map[int]TxDep + txDepCache []TxDep lock sync.RWMutex // async rw event recorder // these fields are only used in one routine - asyncRWSet *RWSet + asyncRWSet RWSet rwEventCh chan []RWEventItem rwEventCache []RWEventItem rwEventCacheIndex int @@ -305,29 +361,34 @@ type MVStates struct { } func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { - m := &MVStates{ - rwSets: make(map[int]*RWSet, txCount), - pendingAccWriteSet: make(map[common.Address]map[AccountState]*StateWrites, txCount), - pendingSlotWriteSet: make(map[common.Address]map[common.Hash]*StateWrites, txCount), - txDepCache: make(map[int]TxDep, txCount), - rwEventCh: make(chan []RWEventItem, 100), - gasFeeReceivers: gasFeeReceivers, + s := &MVStates{ + accWriteSet: make(map[common.Address]map[AccountState]*StateWrites, txCount), + slotWriteSet: make(map[common.Address]map[common.Hash]*StateWrites, txCount), + rwEventCh: make(chan []RWEventItem, 100), + gasFeeReceivers: gasFeeReceivers, } - m.rwEventCache = *rwEventCachePool.Get().(*[]RWEventItem) - m.rwEventCache = m.rwEventCache[:cap(m.rwEventCache)] - m.rwEventCacheIndex = 0 - return m + + s.rwSets = *rwSetsPool.Get().(*[]RWSet) + s.rwSets = s.rwSets[:0] + s.txDepCache = *txDepsPool.Get().(*[]TxDep) + s.txDepCache = s.txDepCache[:0] + return s } func (s *MVStates) EnableAsyncGen() *MVStates { s.asyncWG.Add(1) s.asyncRunning = true + s.rwEventCache = *rwEventCachePool.Get().(*[]RWEventItem) + s.rwEventCache = s.rwEventCache[:cap(s.rwEventCache)] + s.rwEventCacheIndex = 0 + s.asyncRWSet.index = -1 go s.asyncRWEventLoop() return s } func (s *MVStates) Stop() { s.stopAsyncRecorder() + s.ReuseMem() } func (s *MVStates) Copy() *MVStates { @@ -335,26 +396,22 @@ func (s *MVStates) Copy() *MVStates { defer s.lock.Unlock() ns := NewMVStates(len(s.rwSets), s.gasFeeReceivers) ns.nextFinaliseIndex = s.nextFinaliseIndex - for k, v := range s.txDepCache { - ns.txDepCache[k] = v - } - for k, v := range s.rwSets { - ns.rwSets[k] = v - } - for addr, sub := range s.pendingAccWriteSet { + ns.txDepCache = append(ns.txDepCache, s.txDepCache...) + ns.rwSets = append(ns.rwSets, s.rwSets...) + for addr, sub := range s.accWriteSet { for state, writes := range sub { - if _, ok := ns.pendingAccWriteSet[addr]; !ok { - ns.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) + if _, ok := ns.accWriteSet[addr]; !ok { + ns.accWriteSet[addr] = make(map[AccountState]*StateWrites) } - ns.pendingAccWriteSet[addr][state] = writes.Copy() + ns.accWriteSet[addr][state] = writes.Copy() } } - for addr, sub := range s.pendingSlotWriteSet { + for addr, sub := range s.slotWriteSet { for slot, writes := range sub { - if _, ok := ns.pendingSlotWriteSet[addr]; !ok { - ns.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) + if _, ok := ns.slotWriteSet[addr]; !ok { + ns.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) } - ns.pendingSlotWriteSet[addr][slot] = writes.Copy() + ns.slotWriteSet[addr][slot] = writes.Copy() } } return ns @@ -364,12 +421,12 @@ func (s *MVStates) asyncRWEventLoop() { defer s.asyncWG.Done() for { select { - case items, ok := <-s.rwEventCh: + case item, ok := <-s.rwEventCh: if !ok { return } - s.handleRWEvents(items) - rwEventCachePool.Put(&items) + s.handleRWEvents(item) + rwEventCachePool.Put(&item) } } } @@ -390,10 +447,12 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { readFrom, readTo = -1, -1 } recordNewTx = true - s.asyncRWSet = NewEmptyRWSet(item.Index) + s.asyncRWSet = RWSet{ + index: item.Index, + } continue } - if s.asyncRWSet == nil { + if s.asyncRWSet.index < 0 { continue } switch item.Event { @@ -423,10 +482,13 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { } func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem) { - if s.asyncRWSet == nil { + if s.asyncRWSet.index < 0 { return } index := s.asyncRWSet.index + for index >= len(s.rwSets) { + s.rwSets = append(s.rwSets, RWSet{index: -1}) + } s.rwSets[index] = s.asyncRWSet if index > s.nextFinaliseIndex { @@ -442,7 +504,12 @@ func (s *MVStates) RecordNewTx(index int) { if !s.asyncRunning { return } - if index%10 == 0 { + if index%2000 == 0 { + rwEventsAllocGauge.Update(int64(len(rwEventCachePool.ch))) + rwSetsAllocGauge.Update(int64(len(rwSetsPool.ch))) + txDepsAllocGauge.Update(int64(len(txDepsPool.ch))) + } + if index%asyncSendInterval == 0 { s.BatchRecordHandle() } if s.rwEventCacheIndex < len(s.rwEventCache) { @@ -583,7 +650,10 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { s.lock.Lock() defer s.lock.Unlock() index := rwSet.index - s.rwSets[index] = rwSet + for index >= len(s.rwSets) { + s.rwSets = append(s.rwSets, RWSet{index: -1}) + } + s.rwSets[index] = *rwSet // just finalise all previous txs start := s.nextFinaliseIndex if start > index { @@ -593,7 +663,7 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { if err := s.innerFinalise(i, true); err != nil { return err } - s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + s.resolveDepsMapCacheByWrites(i, &(s.rwSets[i])) //log.Debug("Finalise the reads/writes", "index", i, // "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), // "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) @@ -603,11 +673,11 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { } func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { - rwSet := s.rwSets[index] - if rwSet == nil { + if index >= len(s.rwSets) { return fmt.Errorf("finalise a non-exist RWSet, index: %d", index) } + rwSet := s.rwSets[index] if index > s.nextFinaliseIndex { return fmt.Errorf("finalise in wrong order, next: %d, input: %d", s.nextFinaliseIndex, index) } @@ -620,25 +690,25 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { // append to pending write set for addr, sub := range rwSet.accWriteSet { - if _, exist := s.pendingAccWriteSet[addr]; !exist { - s.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) + if _, exist := s.accWriteSet[addr]; !exist { + s.accWriteSet[addr] = make(map[AccountState]*StateWrites) } for state := range sub { - if _, exist := s.pendingAccWriteSet[addr][state]; !exist { - s.pendingAccWriteSet[addr][state] = NewStateWrites() + if _, exist := s.accWriteSet[addr][state]; !exist { + s.accWriteSet[addr][state] = NewStateWrites() } - s.pendingAccWriteSet[addr][state].Append(index) + s.accWriteSet[addr][state].Append(index) } } for addr, sub := range rwSet.slotWriteSet { - if _, exist := s.pendingSlotWriteSet[addr]; !exist { - s.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) + if _, exist := s.slotWriteSet[addr]; !exist { + s.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) } for slot := range sub { - if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { - s.pendingSlotWriteSet[addr][slot] = NewStateWrites() + if _, exist := s.slotWriteSet[addr][slot]; !exist { + s.slotWriteSet[addr][slot] = NewStateWrites() } - s.pendingSlotWriteSet[addr][slot].Append(index) + s.slotWriteSet[addr][slot].Append(index) } } return nil @@ -646,42 +716,45 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common.Hash) { // append to pending write set - if _, exist := s.pendingSlotWriteSet[addr]; !exist { - s.pendingSlotWriteSet[addr] = make(map[common.Hash]*StateWrites) + if _, exist := s.slotWriteSet[addr]; !exist { + s.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) } - if _, exist := s.pendingSlotWriteSet[addr][slot]; !exist { - s.pendingSlotWriteSet[addr][slot] = NewStateWrites() + if _, exist := s.slotWriteSet[addr][slot]; !exist { + s.slotWriteSet[addr][slot] = NewStateWrites() } - s.pendingSlotWriteSet[addr][slot].Append(index) + s.slotWriteSet[addr][slot].Append(index) } func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state AccountState) { // append to pending write set - if _, exist := s.pendingAccWriteSet[addr]; !exist { - s.pendingAccWriteSet[addr] = make(map[AccountState]*StateWrites) + if _, exist := s.accWriteSet[addr]; !exist { + s.accWriteSet[addr] = make(map[AccountState]*StateWrites) } - if _, exist := s.pendingAccWriteSet[addr][state]; !exist { - s.pendingAccWriteSet[addr][state] = NewStateWrites() + if _, exist := s.accWriteSet[addr][state]; !exist { + s.accWriteSet[addr][state] = NewStateWrites() } - s.pendingAccWriteSet[addr][state].Append(index) + s.accWriteSet[addr][state].Append(index) } func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *StateWrites { - if _, exist := s.pendingAccWriteSet[addr]; !exist { + if _, exist := s.accWriteSet[addr]; !exist { return nil } - return s.pendingAccWriteSet[addr][state] + return s.accWriteSet[addr][state] } func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *StateWrites { - if _, exist := s.pendingSlotWriteSet[addr]; !exist { + if _, exist := s.slotWriteSet[addr]; !exist { return nil } - return s.pendingSlotWriteSet[addr][slot] + return s.slotWriteSet[addr][slot] } // resolveDepsMapCacheByWrites must be executed in order func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { + for index >= len(s.txDepCache) { + s.txDepCache = append(s.txDepCache, TxDep{}) + } // analysis dep, if the previous transaction is not executed/validated, re-analysis is required if rwSet.excludedTx { s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) @@ -746,13 +819,16 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { // resolveDepsMapCacheByWrites2 must be executed in order func (s *MVStates) resolveDepsMapCacheByWrites2(index int, reads []RWEventItem) { + for index >= len(s.txDepCache) { + s.txDepCache = append(s.txDepCache, TxDep{}) + } rwSet := s.rwSets[index] // analysis dep, if the previous transaction is not executed/validated, re-analysis is required if rwSet.excludedTx { s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) return } - depSlice := NewTxDepSlice(0) + depSlice := NewTxDepSlice(1) addrMap := make(map[common.Address]struct{}) // check tx dependency, only check key for _, item := range reads { @@ -810,6 +886,9 @@ func (s *MVStates) resolveDepsMapCacheByWrites2(index int, reads []RWEventItem) // resolveDepsCache must be executed in order func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { + for index >= len(s.txDepCache) { + s.txDepCache = append(s.txDepCache, TxDep{}) + } // analysis dep, if the previous transaction is not executed/validated, re-analysis is required if rwSet.excludedTx { s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) @@ -819,10 +898,10 @@ func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { for prev := 0; prev < index; prev++ { // if there are some parallel execution or system txs, it will fulfill in advance // it's ok, and try re-generate later - prevSet := s.rwSets[prev] - if prevSet == nil { + if prev >= len(s.rwSets) { continue } + prevSet := s.rwSets[prev] // if prev tx is tagged ExcludedTxFlag, just skip the check if prevSet.excludedTx { continue @@ -861,28 +940,32 @@ func (s *MVStates) ResolveTxDAG(txCnt int, extraTxDeps ...TxDep) (TxDAG, error) } totalCnt := txCnt + len(extraTxDeps) - txDAG := NewPlainTxDAG(totalCnt) for i := 0; i < txCnt; i++ { if s.rwSets[i].cannotGasFeeDelay { return NewEmptyTxDAG(), nil } - cache := s.txDepCache[i] - if len(cache.TxIndexes) <= (txCnt-1)/2 { - txDAG.TxDeps[i] = cache + } + txDAG := &PlainTxDAG{ + TxDeps: s.txDepCache, + } + if len(extraTxDeps) > 0 { + txDAG.TxDeps = append(txDAG.TxDeps, extraTxDeps...) + } + for i := 0; i < len(txDAG.TxDeps); i++ { + if len(txDAG.TxDeps[i].TxIndexes) <= (totalCnt-1)/2 { continue } // if tx deps larger than half of txs, then convert with NonDependentRelFlag txDAG.TxDeps[i].SetFlag(NonDependentRelFlag) - for j := uint64(0); j < uint64(txCnt); j++ { - if !slices.Contains(cache.TxIndexes, j) && j != uint64(i) { - txDAG.TxDeps[i].TxIndexes = append(txDAG.TxDeps[i].TxIndexes, j) + nd := make([]uint64, 0, totalCnt-1-len(txDAG.TxDeps[i].TxIndexes)) + for j := uint64(0); j < uint64(totalCnt); j++ { + if !slices.Contains(txDAG.TxDeps[i].TxIndexes, j) && j != uint64(i) { + nd = append(nd, j) } } + txDAG.TxDeps[i].TxIndexes = nd } - for i, j := txCnt, 0; i < totalCnt && j < len(extraTxDeps); i, j = i+1, j+1 { - txDAG.TxDeps[i] = extraTxDeps[j] - } - + s.txDepCache = txDAG.TxDeps return txDAG, nil } @@ -890,6 +973,11 @@ func (s *MVStates) FeeReceivers() []common.Address { return s.gasFeeReceivers } +func (s *MVStates) ReuseMem() { + rwSetsPool.Put(&s.rwSets) + txDepsPool.Put(&s.txDepCache) +} + func checkAccDependency(writeSet map[common.Address]map[AccountState]struct{}, readSet map[common.Address]map[AccountState]struct{}) bool { // check tx dependency, only check key, skip version for addr, sub := range writeSet { diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index 76678216b3..c7a90b1139 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "fmt" - "sync" "testing" "time" @@ -66,7 +65,7 @@ func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { ms2 := NewMVStates(txCnt, nil).EnableAsyncGen() ms3 := NewMVStates(txCnt, nil).EnableAsyncGen() for i, rwSet := range rwSets { - ms1.rwSets[i] = rwSet + ms1.rwSets = append(ms1.rwSets, *rwSet) require.NoError(t, ms2.FinaliseWithRWSet(rwSet)) ms3.handleRWEvents(mockRWEventItemsFromRWSet(i, rwSet)) } @@ -110,11 +109,37 @@ func TestMVStates_TxDAG_Compression(t *testing.T) { "time", float64(time.Since(start).Microseconds())/1000) } +var ( + mockRandRWSets []*RWSet + mockSameRWSets []*RWSet + mockDiffRWSets []*RWSet + mockRWEventItems [][]RWEventItem + mockSameRWEventItems [][]RWEventItem + mockDiffRWEventItems [][]RWEventItem +) + +func init() { + mockRandRWSets = mockRandomRWSet(mockRWSetSize) + mockSameRWSets = mockSameRWSet(mockRWSetSize) + mockDiffRWSets = mockDifferentRWSet(mockRWSetSize) + mockRWEventItems = make([][]RWEventItem, mockRWSetSize) + for i, rwSet := range mockRandRWSets { + mockRWEventItems[i] = mockRWEventItemsFromRWSet(i, rwSet) + } + mockSameRWEventItems = make([][]RWEventItem, mockRWSetSize) + for i, rwSet := range mockSameRWSets { + mockSameRWEventItems[i] = mockRWEventItemsFromRWSet(i, rwSet) + } + mockDiffRWEventItems = make([][]RWEventItem, mockRWSetSize) + for i, rwSet := range mockDiffRWSets { + mockDiffRWEventItems[i] = mockRWEventItemsFromRWSet(i, rwSet) + } +} + func BenchmarkResolveTxDAGInMVStates(b *testing.B) { - rwSets := mockRandomRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for i, rwSet := range rwSets { - ms1.rwSets[i] = rwSet + for _, rwSet := range mockRandRWSets { + ms1.rwSets = append(ms1.rwSets, *rwSet) } b.ResetTimer() for i := 0; i < b.N; i++ { @@ -123,9 +148,8 @@ func BenchmarkResolveTxDAGInMVStates(b *testing.B) { } func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { - rwSets := mockRandomRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range rwSets { + for _, rwSet := range mockRandRWSets { ms1.FinaliseWithRWSet(rwSet) } b.ResetTimer() @@ -135,25 +159,55 @@ func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { } func BenchmarkResolveTxDAGByWrites2InMVStates(b *testing.B) { - rwSets := mockRandomRWSet(mockRWSetSize) - items := make([][]RWEventItem, mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for i, rwSet := range rwSets { - items[i] = mockRWEventItemsFromRWSet(i, rwSet) - } b.ResetTimer() for i := 0; i < b.N; i++ { - for _, item := range items { + for _, item := range mockRWEventItems { ms1.handleRWEvents(item) } resolveDepsMapCacheByWrites2InMVStates(ms1) } } +func BenchmarkResolveTxDAG_RWEvent_RandRWSet(b *testing.B) { + benchmarkResolveTxDAGRWEvent(b, mockRWEventItems) +} + +func BenchmarkResolveTxDAG_RWEvent_SameRWSet(b *testing.B) { + benchmarkResolveTxDAGRWEvent(b, mockSameRWEventItems) +} + +func BenchmarkResolveTxDAG_RWEvent_DiffRWSet(b *testing.B) { + benchmarkResolveTxDAGRWEvent(b, mockDiffRWEventItems) +} + +func benchmarkResolveTxDAGRWEvent(b *testing.B, eventItems [][]RWEventItem) { + for i := 0; i < b.N; i++ { + s := NewMVStates(0, nil).EnableAsyncGen() + for _, items := range eventItems { + for _, item := range items { + switch item.Event { + case NewTxRWEvent: + s.RecordNewTx(item.Index) + case ReadAccRWEvent: + s.RecordAccountRead(item.Addr, item.State) + case ReadSlotRWEvent: + s.RecordStorageRead(item.Addr, item.Slot) + case WriteAccRWEvent: + s.RecordAccountWrite(item.Addr, item.State) + case WriteSlotRWEvent: + s.RecordStorageWrite(item.Addr, item.Slot) + } + } + } + s.ResolveTxDAG(mockRWSetSize) + s.Stop() + } +} + func BenchmarkResolveTxDAGByWritesInMVStates_100PercentConflict(b *testing.B) { - rwSets := mockSameRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range rwSets { + for _, rwSet := range mockSameRWSets { ms1.FinaliseWithRWSet(rwSet) } b.ResetTimer() @@ -163,9 +217,8 @@ func BenchmarkResolveTxDAGByWritesInMVStates_100PercentConflict(b *testing.B) { } func BenchmarkResolveTxDAGByWritesInMVStates_0PercentConflict(b *testing.B) { - rwSets := mockDifferentRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range rwSets { + for _, rwSet := range mockDiffRWSets { ms1.FinaliseWithRWSet(rwSet) } b.ResetTimer() @@ -185,73 +238,10 @@ func BenchmarkMVStates_Finalise(b *testing.B) { } } -func checkMap(m map[int][10]byte) { - for i, j := range m { - m[i] = j - } -} - -func BenchmarkEmptyMap(b *testing.B) { - for i := 0; i < b.N; i++ { - m := make(map[int][10]byte) - for j := 0; j < 10000; j++ { - m[i] = [10]byte{byte(j)} - } - checkMap(m) - } -} - -func BenchmarkInitMapWithSize(b *testing.B) { - for i := 0; i < b.N; i++ { - m := make(map[int][10]byte, 10) - for j := 0; j < 1000; j++ { - m[i] = [10]byte{byte(j)} - } - } -} - -func BenchmarkReuseMap(b *testing.B) { - sp := sync.Pool{New: func() interface{} { - return make(map[int]struct{}, 10) - }} - for i := 0; i < b.N; i++ { - m := sp.Get().(map[int]struct{}) - for j := 0; j < 1000; j++ { - m[i] = struct{}{} - } - for k := range m { - delete(m, k) - } - sp.Put(m) - } -} - -func BenchmarkExistArray(b *testing.B) { - for i := 0; i < b.N; i++ { - m := make(map[[20]byte]struct{}) - m[common.Address{1}] = struct{}{} - addr := common.Address{1} - if _, ok := m[addr]; ok { - continue - } - } -} - -func BenchmarkDonotExistArray(b *testing.B) { - for i := 0; i < b.N; i++ { - m := make(map[[20]byte]struct{}) - addr := common.Address{1} - if _, ok := m[addr]; !ok { - m[addr] = struct{}{} - delete(m, addr) - } - } -} - func resolveTxDAGInMVStates(s *MVStates, txCnt int) TxDAG { txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { - s.resolveDepsCache(i, s.rwSets[i]) + s.resolveDepsCache(i, &s.rwSets[i]) txDAG.TxDeps[i] = s.txDepCache[i] } return txDAG @@ -261,7 +251,7 @@ func resolveDepsMapCacheByWritesInMVStates(s *MVStates) TxDAG { txCnt := s.nextFinaliseIndex txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { - s.resolveDepsMapCacheByWrites(i, s.rwSets[i]) + s.resolveDepsMapCacheByWrites(i, &s.rwSets[i]) txDAG.TxDeps[i] = s.txDepCache[i] } return txDAG diff --git a/miner/worker.go b/miner/worker.go index a26dc0f948..4fb2537bd4 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1085,13 +1085,13 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn // get txDAG data from the stateDB // txIndex is the index of this txDAG transaction + defer func() { + statedb.MVStates().Stop() + }() txDAG, err := statedb.ResolveTxDAG(txIndex, types.TxDep{Flags: &types.NonDependentRelFlag}) if txDAG == nil { return nil, err } - if metrics.EnabledExpensive { - go types.EvaluateTxDAGPerformance(txDAG) - } publicKey := sender.Public() publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) From e0d56ca444399cbadc1a7a8eab1322457492745a Mon Sep 17 00:00:00 2001 From: galaio Date: Thu, 12 Sep 2024 10:53:54 +0800 Subject: [PATCH 31/42] txdag: using a new mem pool; --- core/types/mvstates.go | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 27b204dc22..4b99261740 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -31,12 +31,6 @@ func init() { cache := make([]RWEventItem, 400) rwEventCachePool.Put(&cache) } - for i := 0; i < initSyncPoolSize; i++ { - rwSets := make([]RWSet, 4000) - rwSetsPool.Put(&rwSets) - txDeps := make([]TxDep, 4000) - txDepsPool.Put(&txDeps) - } } type ChanPool struct { @@ -313,10 +307,6 @@ func (w *StateWrites) Copy() *StateWrites { var ( rwEventsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwevents/cnt", nil) rwEventsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwevents/gauge", nil) - rwSetsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwsets/cnt", nil) - rwSetsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwsets/gauge", nil) - txDepsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/txdeps/cnt", nil) - txDepsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/txdeps/gauge", nil) ) var ( @@ -325,16 +315,6 @@ var ( buf := make([]RWEventItem, 0) return &buf }) - rwSetsPool = NewChanPool(initSyncPoolSize, func() any { - rwSetsAllocMeter.Mark(1) - buf := make([]RWSet, 0) - return &buf - }) - txDepsPool = NewChanPool(initSyncPoolSize, func() any { - txDepsAllocMeter.Mark(1) - buf := make([]TxDep, 0) - return &buf - }) ) type MVStates struct { @@ -367,11 +347,6 @@ func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { rwEventCh: make(chan []RWEventItem, 100), gasFeeReceivers: gasFeeReceivers, } - - s.rwSets = *rwSetsPool.Get().(*[]RWSet) - s.rwSets = s.rwSets[:0] - s.txDepCache = *txDepsPool.Get().(*[]TxDep) - s.txDepCache = s.txDepCache[:0] return s } @@ -388,7 +363,6 @@ func (s *MVStates) EnableAsyncGen() *MVStates { func (s *MVStates) Stop() { s.stopAsyncRecorder() - s.ReuseMem() } func (s *MVStates) Copy() *MVStates { @@ -506,8 +480,6 @@ func (s *MVStates) RecordNewTx(index int) { } if index%2000 == 0 { rwEventsAllocGauge.Update(int64(len(rwEventCachePool.ch))) - rwSetsAllocGauge.Update(int64(len(rwSetsPool.ch))) - txDepsAllocGauge.Update(int64(len(txDepsPool.ch))) } if index%asyncSendInterval == 0 { s.BatchRecordHandle() @@ -973,11 +945,6 @@ func (s *MVStates) FeeReceivers() []common.Address { return s.gasFeeReceivers } -func (s *MVStates) ReuseMem() { - rwSetsPool.Put(&s.rwSets) - txDepsPool.Put(&s.txDepCache) -} - func checkAccDependency(writeSet map[common.Address]map[AccountState]struct{}, readSet map[common.Address]map[AccountState]struct{}) bool { // check tx dependency, only check key, skip version for addr, sub := range writeSet { From f65ca0add30c52b1a36bfea9161cfe24c06aaeec Mon Sep 17 00:00:00 2001 From: galaio Date: Mon, 23 Sep 2024 16:31:06 +0800 Subject: [PATCH 32/42] txdag: clean codes; --- core/state/statedb.go | 1 - core/types/dag.go | 78 ------------- core/types/dag_test.go | 171 --------------------------- core/types/mvstates.go | 223 ++++-------------------------------- core/types/mvstates_test.go | 84 +------------- miner/worker.go | 2 - 6 files changed, 23 insertions(+), 536 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 77c719aadc..0d5e7a7eef 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1707,7 +1707,6 @@ func (s *StateDB) StartTxRecorder(isExcludeTx bool) { if s.mvStates == nil { return } - //log.Debug("StartTxRecorder", "tx", s.txIndex) if isExcludeTx { rwSet := types.NewEmptyRWSet(s.txIndex).WithExcludedTxFlag() if err := s.mvStates.FinaliseWithRWSet(rwSet); err != nil { diff --git a/core/types/dag.go b/core/types/dag.go index f53e233b52..1841332467 100644 --- a/core/types/dag.go +++ b/core/types/dag.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/slices" ) const TxDAGAbiJson = ` @@ -318,83 +317,6 @@ func (d *PlainTxDAG) Size() int { return len(enc) } -// MergeTxDAGExecutionPaths will merge duplicate tx path for scheduling parallel. -// Any tx cannot exist in >= 2 paths. -func MergeTxDAGExecutionPaths(d TxDAG, from, to uint64) ([][]uint64, error) { - if from > to || to >= uint64(d.TxCount()) { - return nil, fmt.Errorf("input wrong from: %v, to: %v, txCnt:%v", from, to, d.TxCount()) - } - mergeMap := make(map[uint64][]uint64, d.TxCount()) - txMap := make(map[uint64]uint64, d.TxCount()) - for i := int(to); i >= int(from); i-- { - index, merge := uint64(i), uint64(i) - deps := TxDependency(d, i) - // drop the out range txs - deps = depExcludeTxRange(deps, from, to) - if oldIdx, exist := findTxPathIndex(deps, index, txMap); exist { - merge = oldIdx - } - for _, tx := range deps { - txMap[tx] = merge - } - txMap[index] = merge - } - - // result by index order - for f, t := range txMap { - if mergeMap[t] == nil { - mergeMap[t] = make([]uint64, 0) - } - if f < from || f > to { - continue - } - mergeMap[t] = append(mergeMap[t], f) - } - mergePaths := make([][]uint64, 0, len(mergeMap)) - for i := from; i <= to; i++ { - path, ok := mergeMap[i] - if !ok { - continue - } - slices.Sort(path) - mergePaths = append(mergePaths, path) - } - - return mergePaths, nil -} - -// depExcludeTxRange drop all from~to items, and deps is ordered. -func depExcludeTxRange(deps []uint64, from uint64, to uint64) []uint64 { - if len(deps) == 0 { - return deps - } - start, end := 0, len(deps)-1 - for start < len(deps) && deps[start] < from { - start++ - } - for end >= 0 && deps[end] > to { - end-- - } - if start > end { - return nil - } - return deps[start : end+1] -} - -func findTxPathIndex(path []uint64, cur uint64, txMap map[uint64]uint64) (uint64, bool) { - if old, ok := txMap[cur]; ok { - return old, true - } - - for _, index := range path { - if old, ok := txMap[index]; ok { - return old, true - } - } - - return 0, false -} - // TxDep store the current tx dependency relation with other txs type TxDep struct { TxIndexes []uint64 diff --git a/core/types/dag_test.go b/core/types/dag_test.go index 7da1a183b3..de4f3e1c44 100644 --- a/core/types/dag_test.go +++ b/core/types/dag_test.go @@ -8,16 +8,10 @@ import ( "github.com/cometbft/cometbft/libs/rand" - "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var ( - mockAddr = common.HexToAddress("0x482bA86399ab6Dcbe54071f8d22258688B4509b1") - mockHash = common.HexToHash("0xdc13f8d7bdb8ec4de02cd4a50a1aa2ab73ec8814e0cdb550341623be3dd8ab7a") -) - func TestEncodeTxDAGCalldata(t *testing.T) { tg := mockSimpleDAG() data, err := EncodeTxDAGCalldata(tg) @@ -52,92 +46,6 @@ func TestEvaluateTxDAG(t *testing.T) { EvaluateTxDAGPerformance(dag) } -func TestMergeTxDAGExecutionPaths_Simple(t *testing.T) { - tests := []struct { - d TxDAG - from uint64 - to uint64 - expect [][]uint64 - }{ - { - d: mockSimpleDAG(), - from: 0, - to: 9, - expect: [][]uint64{ - {0, 3, 4}, - {1, 2, 5, 6, 7}, - {8, 9}, - }, - }, - { - d: mockSimpleDAG(), - from: 1, - to: 1, - expect: [][]uint64{ - {1}, - }, - }, - { - d: mockSimpleDAGWithLargeDeps(), - from: 0, - to: 9, - expect: [][]uint64{ - {5, 6}, - {0, 1, 2, 3, 4, 7, 8, 9}, - }, - }, - { - d: mockSystemTxDAGWithLargeDeps(), - from: 0, - to: 11, - expect: [][]uint64{ - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - {10}, - {11}, - }, - }, - { - d: mockSimpleDAGWithLargeDeps(), - from: 5, - to: 8, - expect: [][]uint64{ - {5, 6}, - {7}, - {8}, - }, - }, - { - d: mockSimpleDAGWithLargeDeps(), - from: 5, - to: 9, - expect: [][]uint64{ - {5, 6}, - {7}, - {8, 9}, - }, - }, - } - for i, item := range tests { - paths, err := MergeTxDAGExecutionPaths(item.d, item.from, item.to) - require.NoError(t, err) - require.Equal(t, item.expect, paths, i) - } -} - -func TestMergeTxDAGExecutionPaths_Random(t *testing.T) { - dag := mockRandomDAG(10000) - paths, _ := MergeTxDAGExecutionPaths(dag, 0, uint64(dag.TxCount()-1)) - txMap := make(map[uint64]uint64, dag.TxCount()) - for _, path := range paths { - for _, index := range path { - old, ok := txMap[index] - require.False(t, ok, index, path, old) - txMap[index] = path[0] - } - } - require.Equal(t, dag.TxCount(), len(txMap)) -} - func TestTxDAG_Compression(t *testing.T) { dag := mockRandomDAG(10000) enc, err := EncodeTxDAG(dag) @@ -146,13 +54,6 @@ func TestTxDAG_Compression(t *testing.T) { t.Log("enc", len(enc), "compressed", len(encoded), "ratio", 1-(float64(len(encoded))/float64(len(enc)))) } -func BenchmarkMergeTxDAGExecutionPaths(b *testing.B) { - dag := mockRandomDAG(100000) - for i := 0; i < b.N; i++ { - MergeTxDAGExecutionPaths(dag, 0, uint64(dag.TxCount()-1)) - } -} - func BenchmarkTxDAG_Encode(b *testing.B) { dag := mockRandomDAG(10000) for i := 0; i < b.N; i++ { @@ -183,22 +84,6 @@ func mockSimpleDAG() TxDAG { return dag } -func mockSimpleDAGWithLargeDeps() TxDAG { - dag := NewPlainTxDAG(10) - dag.TxDeps[0].TxIndexes = []uint64{} - dag.TxDeps[1].TxIndexes = []uint64{} - dag.TxDeps[2].TxIndexes = []uint64{} - dag.TxDeps[3].TxIndexes = []uint64{0} - dag.TxDeps[4].TxIndexes = []uint64{0} - dag.TxDeps[5].TxIndexes = []uint64{} - dag.TxDeps[6].TxIndexes = []uint64{5} - dag.TxDeps[7].TxIndexes = []uint64{2, 4} - dag.TxDeps[8].TxIndexes = []uint64{} - //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 3, 4, 8} - dag.TxDeps[9] = NewTxDep([]uint64{2, 5, 6, 7}, NonDependentRelFlag) - return dag -} - func mockRandomDAG(txLen int) TxDAG { dag := NewPlainTxDAG(txLen) for i := 0; i < txLen; i++ { @@ -353,59 +238,3 @@ func TestTxDep_Flags(t *testing.T) { require.True(t, dep.CheckFlag(NonDependentRelFlag)) require.False(t, dep.CheckFlag(ExcludedTxFlag)) } - -func TestDepExcludeTxRange(t *testing.T) { - tests := []struct { - src []uint64 - from uint64 - to uint64 - expect []uint64 - }{ - { - src: nil, - from: 0, - to: 4, - expect: nil, - }, - { - src: []uint64{}, - from: 0, - to: 4, - expect: []uint64{}, - }, - { - src: []uint64{0, 1, 2, 3, 4}, - from: 4, - to: 4, - expect: []uint64{4}, - }, - { - src: []uint64{0, 1, 2, 3, 4}, - from: 1, - to: 3, - expect: []uint64{1, 2, 3}, - }, - { - src: []uint64{0, 1, 2, 3, 4}, - from: 5, - to: 6, - expect: nil, - }, - { - src: []uint64{2, 3, 4}, - from: 0, - to: 1, - expect: nil, - }, - { - src: []uint64{0, 1, 2, 3, 4}, - from: 0, - to: 4, - expect: []uint64{0, 1, 2, 3, 4}, - }, - } - - for i, item := range tests { - require.Equal(t, item.expect, depExcludeTxRange(item.src, item.from, item.to), i) - } -} diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 4b99261740..fe1d395880 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -471,7 +471,7 @@ func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem) { } // reset nextFinaliseIndex to index+1, it may revert to previous txs s.nextFinaliseIndex = index + 1 - s.resolveDepsMapCacheByWrites2(index, reads) + s.resolveDepsMapCacheByWrites(index, reads) } func (s *MVStates) RecordNewTx(index int) { @@ -635,10 +635,26 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { if err := s.innerFinalise(i, true); err != nil { return err } - s.resolveDepsMapCacheByWrites(i, &(s.rwSets[i])) - //log.Debug("Finalise the reads/writes", "index", i, - // "readCnt", len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet), - // "writeCnt", len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) + reads := make([]RWEventItem, 0, len(s.rwSets[i].accReadSet)+len(s.rwSets[i].slotReadSet)) + for addr, sub := range s.rwSets[i].accReadSet { + for state := range sub { + reads = append(reads, RWEventItem{ + Event: ReadAccRWEvent, + Addr: addr, + State: state, + }) + } + } + for addr, sub := range s.rwSets[i].slotReadSet { + for slot := range sub { + reads = append(reads, RWEventItem{ + Event: ReadSlotRWEvent, + Addr: addr, + Slot: slot, + }) + } + } + s.resolveDepsMapCacheByWrites(i, reads) } return nil @@ -723,74 +739,7 @@ func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *State } // resolveDepsMapCacheByWrites must be executed in order -func (s *MVStates) resolveDepsMapCacheByWrites(index int, rwSet *RWSet) { - for index >= len(s.txDepCache) { - s.txDepCache = append(s.txDepCache, TxDep{}) - } - // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - if rwSet.excludedTx { - s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) - return - } - depSlice := NewTxDepSlice(0) - // check tx dependency, only check key, skip version - for addr, sub := range rwSet.accReadSet { - for state := range sub { - // check self destruct - if state == AccountSelf { - state = AccountSuicide - } - writes := s.queryAccWrites(addr, state) - if writes == nil { - continue - } - find := writes.FindLastWrite(index) - if find < 0 { - continue - } - tx := uint64(find) - if depSlice.exist(tx) { - continue - } - depSlice.add(tx) - } - } - for addr, sub := range rwSet.slotReadSet { - for slot := range sub { - writes := s.querySlotWrites(addr, slot) - if writes == nil { - continue - } - find := writes.FindLastWrite(index) - if find < 0 { - continue - } - tx := uint64(find) - if depSlice.exist(tx) { - continue - } - depSlice.add(tx) - } - } - // clear redundancy deps compared with prev - preDeps := depSlice.deps() - var removed []uint64 - for _, prev := range preDeps { - for _, tx := range s.txDepCache[int(prev)].TxIndexes { - if depSlice.exist(tx) { - removed = append(removed, tx) - } - } - } - for _, tx := range removed { - depSlice.remove(tx) - } - //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depMap.deps()) - s.txDepCache[index] = NewTxDep(depSlice.deps()) -} - -// resolveDepsMapCacheByWrites2 must be executed in order -func (s *MVStates) resolveDepsMapCacheByWrites2(index int, reads []RWEventItem) { +func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { for index >= len(s.txDepCache) { s.txDepCache = append(s.txDepCache, TxDep{}) } @@ -852,55 +801,9 @@ func (s *MVStates) resolveDepsMapCacheByWrites2(index int, reads []RWEventItem) for _, tx := range removed { depSlice.remove(tx) } - //log.Debug("resolveDepsMapCacheByWrites", "tx", index, "deps", depSlice.deps()) s.txDepCache[index] = NewTxDep(depSlice.deps()) } -// resolveDepsCache must be executed in order -func (s *MVStates) resolveDepsCache(index int, rwSet *RWSet) { - for index >= len(s.txDepCache) { - s.txDepCache = append(s.txDepCache, TxDep{}) - } - // analysis dep, if the previous transaction is not executed/validated, re-analysis is required - if rwSet.excludedTx { - s.txDepCache[index] = NewTxDep([]uint64{}, ExcludedTxFlag) - return - } - depMap := NewTxDepMap(0) - for prev := 0; prev < index; prev++ { - // if there are some parallel execution or system txs, it will fulfill in advance - // it's ok, and try re-generate later - if prev >= len(s.rwSets) { - continue - } - prevSet := s.rwSets[prev] - // if prev tx is tagged ExcludedTxFlag, just skip the check - if prevSet.excludedTx { - continue - } - // check if there has written op before i - if checkAccDependency(prevSet.accWriteSet, rwSet.accReadSet) { - depMap.add(uint64(prev)) - // clear redundancy deps compared with prev - for _, dep := range depMap.deps() { - if slices.Contains(s.txDepCache[prev].TxIndexes, dep) { - depMap.remove(dep) - } - } - } - if checkSlotDependency(prevSet.slotWriteSet, rwSet.slotReadSet) { - depMap.add(uint64(prev)) - // clear redundancy deps compared with prev - for _, dep := range depMap.deps() { - if slices.Contains(s.txDepCache[prev].TxIndexes, dep) { - depMap.remove(dep) - } - } - } - } - s.txDepCache[index] = NewTxDep(depMap.deps()) -} - // ResolveTxDAG generate TxDAG from RWSets func (s *MVStates) ResolveTxDAG(txCnt int, extraTxDeps ...TxDep) (TxDAG, error) { s.stopAsyncRecorder() @@ -945,88 +848,6 @@ func (s *MVStates) FeeReceivers() []common.Address { return s.gasFeeReceivers } -func checkAccDependency(writeSet map[common.Address]map[AccountState]struct{}, readSet map[common.Address]map[AccountState]struct{}) bool { - // check tx dependency, only check key, skip version - for addr, sub := range writeSet { - if _, ok := readSet[addr]; !ok { - continue - } - for state := range sub { - // check suicide, add read address flag, it only for check suicide quickly, and cannot for other scenarios. - if state == AccountSuicide { - if _, ok := readSet[addr][AccountSelf]; ok { - return true - } - continue - } - if _, ok := readSet[addr][state]; ok { - return true - } - } - } - - return false -} - -func checkSlotDependency(writeSet map[common.Address]map[common.Hash]struct{}, readSet map[common.Address]map[common.Hash]struct{}) bool { - // check tx dependency, only check key, skip version - for addr, sub := range writeSet { - if _, ok := readSet[addr]; !ok { - continue - } - for slot := range sub { - if _, ok := readSet[addr][slot]; ok { - return true - } - } - } - - return false -} - -type TxDepMap struct { - tm map[uint64]struct{} - cache []uint64 -} - -func NewTxDepMap(cap int) *TxDepMap { - return &TxDepMap{ - tm: make(map[uint64]struct{}, cap), - } -} - -func (m *TxDepMap) add(index uint64) { - m.cache = nil - m.tm[index] = struct{}{} -} - -func (m *TxDepMap) exist(index uint64) bool { - _, ok := m.tm[index] - return ok -} - -func (m *TxDepMap) deps() []uint64 { - if m.cache != nil { - return m.cache - } - res := make([]uint64, 0, len(m.tm)) - for index := range m.tm { - res = append(res, index) - } - slices.Sort(res) - m.cache = res - return m.cache -} - -func (m *TxDepMap) remove(index uint64) { - m.cache = nil - delete(m.tm, index) -} - -func (m *TxDepMap) len() int { - return len(m.tm) -} - type TxDepSlice struct { indexes []uint64 } diff --git a/core/types/mvstates_test.go b/core/types/mvstates_test.go index c7a90b1139..9ede0e5ad4 100644 --- a/core/types/mvstates_test.go +++ b/core/types/mvstates_test.go @@ -58,25 +58,6 @@ func TestMVStates_ResolveTxDAG_Async(t *testing.T) { require.NoError(t, err) } -func TestMVStates_ResolveTxDAG_Compare(t *testing.T) { - txCnt := 3000 - rwSets := mockRandomRWSet(txCnt) - ms1 := NewMVStates(txCnt, nil).EnableAsyncGen() - ms2 := NewMVStates(txCnt, nil).EnableAsyncGen() - ms3 := NewMVStates(txCnt, nil).EnableAsyncGen() - for i, rwSet := range rwSets { - ms1.rwSets = append(ms1.rwSets, *rwSet) - require.NoError(t, ms2.FinaliseWithRWSet(rwSet)) - ms3.handleRWEvents(mockRWEventItemsFromRWSet(i, rwSet)) - } - - d1 := resolveTxDAGInMVStates(ms1, txCnt) - d2 := resolveDepsMapCacheByWritesInMVStates(ms2) - d3 := resolveDepsMapCacheByWrites2InMVStates(ms3) - require.Equal(t, d1.(*PlainTxDAG).String(), d2.(*PlainTxDAG).String()) - require.Equal(t, d1.(*PlainTxDAG).String(), d3.(*PlainTxDAG).String()) -} - func TestMVStates_TxDAG_Compression(t *testing.T) { txCnt := 10000 rwSets := mockRandomRWSet(txCnt) @@ -136,36 +117,14 @@ func init() { } } -func BenchmarkResolveTxDAGInMVStates(b *testing.B) { - ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range mockRandRWSets { - ms1.rwSets = append(ms1.rwSets, *rwSet) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - resolveTxDAGInMVStates(ms1, mockRWSetSize) - } -} - func BenchmarkResolveTxDAGByWritesInMVStates(b *testing.B) { - ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range mockRandRWSets { - ms1.FinaliseWithRWSet(rwSet) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - resolveDepsMapCacheByWritesInMVStates(ms1) - } -} - -func BenchmarkResolveTxDAGByWrites2InMVStates(b *testing.B) { ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() b.ResetTimer() for i := 0; i < b.N; i++ { for _, item := range mockRWEventItems { ms1.handleRWEvents(item) } - resolveDepsMapCacheByWrites2InMVStates(ms1) + resolveDepsMapCacheByWritesInMVStates(ms1) } } @@ -205,28 +164,6 @@ func benchmarkResolveTxDAGRWEvent(b *testing.B, eventItems [][]RWEventItem) { } } -func BenchmarkResolveTxDAGByWritesInMVStates_100PercentConflict(b *testing.B) { - ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range mockSameRWSets { - ms1.FinaliseWithRWSet(rwSet) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - resolveDepsMapCacheByWritesInMVStates(ms1) - } -} - -func BenchmarkResolveTxDAGByWritesInMVStates_0PercentConflict(b *testing.B) { - ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() - for _, rwSet := range mockDiffRWSets { - ms1.FinaliseWithRWSet(rwSet) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - resolveDepsMapCacheByWritesInMVStates(ms1) - } -} - func BenchmarkMVStates_Finalise(b *testing.B) { rwSets := mockRandomRWSet(mockRWSetSize) ms1 := NewMVStates(mockRWSetSize, nil).EnableAsyncGen() @@ -238,26 +175,7 @@ func BenchmarkMVStates_Finalise(b *testing.B) { } } -func resolveTxDAGInMVStates(s *MVStates, txCnt int) TxDAG { - txDAG := NewPlainTxDAG(txCnt) - for i := 0; i < txCnt; i++ { - s.resolveDepsCache(i, &s.rwSets[i]) - txDAG.TxDeps[i] = s.txDepCache[i] - } - return txDAG -} - func resolveDepsMapCacheByWritesInMVStates(s *MVStates) TxDAG { - txCnt := s.nextFinaliseIndex - txDAG := NewPlainTxDAG(txCnt) - for i := 0; i < txCnt; i++ { - s.resolveDepsMapCacheByWrites(i, &s.rwSets[i]) - txDAG.TxDeps[i] = s.txDepCache[i] - } - return txDAG -} - -func resolveDepsMapCacheByWrites2InMVStates(s *MVStates) TxDAG { txCnt := s.nextFinaliseIndex txDAG := NewPlainTxDAG(txCnt) for i := 0; i < txCnt; i++ { diff --git a/miner/worker.go b/miner/worker.go index 4fb2537bd4..bfd60f2536 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -912,7 +912,6 @@ func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*typ ) receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { - //log.Debug("ApplyTransaction err", "block", env.header.Number.Uint64(), "tx", env.tcount, "err", err) env.state.RevertToSnapshot(snap) env.gasPool.SetGas(gp) } @@ -1108,7 +1107,6 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn return nil, fmt.Errorf("failed to encode txDAG, err: %v", err) } - //log.Debug("EncodeTxDAGCalldata", "tx", txDAG.TxCount(), "data", len(data), "dag", txDAG) // Create the transaction tx := types.NewTx(&types.LegacyTx{ Nonce: nonce, From e9b1e2e150e4ae3a784008bdb2fde731beb1d8e6 Mon Sep 17 00:00:00 2001 From: galaio Date: Tue, 24 Sep 2024 14:50:54 +0800 Subject: [PATCH 33/42] txdag: support find all prev tx from rw list; --- core/types/mvstates.go | 58 ++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index fe1d395880..bdff2fcd83 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -245,17 +245,17 @@ type RWEventItem struct { Slot common.Hash } -type StateWrites struct { +type RWTxList struct { list []int } -func NewStateWrites() *StateWrites { - return &StateWrites{ +func NewStateWrites() *RWTxList { + return &RWTxList{ list: make([]int, 0), } } -func (w *StateWrites) Append(pw int) { +func (w *RWTxList) Append(pw int) { if i, found := w.SearchTxIndex(pw); found { w.list[i] = pw return @@ -270,7 +270,7 @@ func (w *StateWrites) Append(pw int) { } } -func (w *StateWrites) SearchTxIndex(txIndex int) (int, bool) { +func (w *RWTxList) SearchTxIndex(txIndex int) (int, bool) { n := len(w.list) i, j := 0, n for i < j { @@ -285,7 +285,7 @@ func (w *StateWrites) SearchTxIndex(txIndex int) (int, bool) { return i, i < n && w.list[i] == txIndex } -func (w *StateWrites) FindLastWrite(txIndex int) int { +func (w *RWTxList) FindLastWrite(txIndex int) int { var i, _ = w.SearchTxIndex(txIndex) for j := i - 1; j >= 0; j-- { if w.list[j] < txIndex { @@ -296,8 +296,19 @@ func (w *StateWrites) FindLastWrite(txIndex int) int { return -1 } -func (w *StateWrites) Copy() *StateWrites { - np := &StateWrites{} +func (w *RWTxList) FindPrevWrites(txIndex int) []int { + var i, _ = w.SearchTxIndex(txIndex) + for j := i - 1; j >= 0; j-- { + if w.list[j] < txIndex { + return w.list[:j+1] + } + } + + return nil +} + +func (w *RWTxList) Copy() *RWTxList { + np := &RWTxList{} for i, item := range w.list { np.list[i] = item } @@ -318,9 +329,12 @@ var ( ) type MVStates struct { - rwSets []RWSet - accWriteSet map[common.Address]map[AccountState]*StateWrites - slotWriteSet map[common.Address]map[common.Hash]*StateWrites + rwSets []RWSet + accWriteSet map[common.Address]map[AccountState]*RWTxList + slotWriteSet map[common.Address]map[common.Hash]*RWTxList + // TODO: maintain read tx list for states here + accReadSet map[common.Address]map[AccountState]*RWTxList + slotReadSet map[common.Address]map[common.Hash]*RWTxList nextFinaliseIndex int gasFeeReceivers []common.Address // dependency map cache for generating TxDAG @@ -342,8 +356,8 @@ type MVStates struct { func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { s := &MVStates{ - accWriteSet: make(map[common.Address]map[AccountState]*StateWrites, txCount), - slotWriteSet: make(map[common.Address]map[common.Hash]*StateWrites, txCount), + accWriteSet: make(map[common.Address]map[AccountState]*RWTxList, txCount), + slotWriteSet: make(map[common.Address]map[common.Hash]*RWTxList, txCount), rwEventCh: make(chan []RWEventItem, 100), gasFeeReceivers: gasFeeReceivers, } @@ -375,7 +389,7 @@ func (s *MVStates) Copy() *MVStates { for addr, sub := range s.accWriteSet { for state, writes := range sub { if _, ok := ns.accWriteSet[addr]; !ok { - ns.accWriteSet[addr] = make(map[AccountState]*StateWrites) + ns.accWriteSet[addr] = make(map[AccountState]*RWTxList) } ns.accWriteSet[addr][state] = writes.Copy() } @@ -383,7 +397,7 @@ func (s *MVStates) Copy() *MVStates { for addr, sub := range s.slotWriteSet { for slot, writes := range sub { if _, ok := ns.slotWriteSet[addr]; !ok { - ns.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) + ns.slotWriteSet[addr] = make(map[common.Hash]*RWTxList) } ns.slotWriteSet[addr][slot] = writes.Copy() } @@ -679,7 +693,7 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { // append to pending write set for addr, sub := range rwSet.accWriteSet { if _, exist := s.accWriteSet[addr]; !exist { - s.accWriteSet[addr] = make(map[AccountState]*StateWrites) + s.accWriteSet[addr] = make(map[AccountState]*RWTxList) } for state := range sub { if _, exist := s.accWriteSet[addr][state]; !exist { @@ -690,7 +704,7 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { } for addr, sub := range rwSet.slotWriteSet { if _, exist := s.slotWriteSet[addr]; !exist { - s.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) + s.slotWriteSet[addr] = make(map[common.Hash]*RWTxList) } for slot := range sub { if _, exist := s.slotWriteSet[addr][slot]; !exist { @@ -705,7 +719,7 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common.Hash) { // append to pending write set if _, exist := s.slotWriteSet[addr]; !exist { - s.slotWriteSet[addr] = make(map[common.Hash]*StateWrites) + s.slotWriteSet[addr] = make(map[common.Hash]*RWTxList) } if _, exist := s.slotWriteSet[addr][slot]; !exist { s.slotWriteSet[addr][slot] = NewStateWrites() @@ -716,7 +730,7 @@ func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state AccountState) { // append to pending write set if _, exist := s.accWriteSet[addr]; !exist { - s.accWriteSet[addr] = make(map[AccountState]*StateWrites) + s.accWriteSet[addr] = make(map[AccountState]*RWTxList) } if _, exist := s.accWriteSet[addr][state]; !exist { s.accWriteSet[addr][state] = NewStateWrites() @@ -724,14 +738,14 @@ func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state Accoun s.accWriteSet[addr][state].Append(index) } -func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *StateWrites { +func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *RWTxList { if _, exist := s.accWriteSet[addr]; !exist { return nil } return s.accWriteSet[addr][state] } -func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *StateWrites { +func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *RWTxList { if _, exist := s.slotWriteSet[addr]; !exist { return nil } @@ -754,7 +768,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { // check tx dependency, only check key for _, item := range reads { // check account states & slots - var writes *StateWrites + var writes *RWTxList if item.Event == ReadAccRWEvent { writes = s.queryAccWrites(item.Addr, item.State) } else { From 917aaa0f249dc34b6f8c48d6a573c5e1b0494cc7 Mon Sep 17 00:00:00 2001 From: galaio Date: Tue, 24 Sep 2024 14:53:44 +0800 Subject: [PATCH 34/42] txdag: support find all prev tx from rw list; --- core/types/mvstates.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index bdff2fcd83..b93bb415a6 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -446,6 +446,7 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { switch item.Event { // recorde current read/write event case ReadAccRWEvent, ReadSlotRWEvent: + // TODO: maintain read list here if readFrom < 0 { readFrom = i } @@ -796,6 +797,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { } } } + // TODO: check read before write dependency here for _, addr := range s.gasFeeReceivers { if _, ok := addrMap[addr]; ok { rwSet.cannotGasFeeDelay = true From c77b5451971f504486774a5bddaf5491a6f46587 Mon Sep 17 00:00:00 2001 From: welkin22 Date: Tue, 24 Sep 2024 16:31:48 +0800 Subject: [PATCH 35/42] Add the dependency of read-before-write to the DAG --- core/types/mvstates.go | 202 ++++++++++++++++++++++++++++++++++------- 1 file changed, 170 insertions(+), 32 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index b93bb415a6..f6d0e0a49a 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -249,7 +249,7 @@ type RWTxList struct { list []int } -func NewStateWrites() *RWTxList { +func NewRWTxList() *RWTxList { return &RWTxList{ list: make([]int, 0), } @@ -285,7 +285,7 @@ func (w *RWTxList) SearchTxIndex(txIndex int) (int, bool) { return i, i < n && w.list[i] == txIndex } -func (w *RWTxList) FindLastWrite(txIndex int) int { +func (w *RWTxList) FindLastTx(txIndex int) int { var i, _ = w.SearchTxIndex(txIndex) for j := i - 1; j >= 0; j-- { if w.list[j] < txIndex { @@ -296,7 +296,7 @@ func (w *RWTxList) FindLastWrite(txIndex int) int { return -1 } -func (w *RWTxList) FindPrevWrites(txIndex int) []int { +func (w *RWTxList) FindPrevTxs(txIndex int) []int { var i, _ = w.SearchTxIndex(txIndex) for j := i - 1; j >= 0; j-- { if w.list[j] < txIndex { @@ -329,10 +329,9 @@ var ( ) type MVStates struct { - rwSets []RWSet - accWriteSet map[common.Address]map[AccountState]*RWTxList - slotWriteSet map[common.Address]map[common.Hash]*RWTxList - // TODO: maintain read tx list for states here + rwSets []RWSet + accWriteSet map[common.Address]map[AccountState]*RWTxList + slotWriteSet map[common.Address]map[common.Hash]*RWTxList accReadSet map[common.Address]map[AccountState]*RWTxList slotReadSet map[common.Address]map[common.Hash]*RWTxList nextFinaliseIndex int @@ -358,6 +357,8 @@ func NewMVStates(txCount int, gasFeeReceivers []common.Address) *MVStates { s := &MVStates{ accWriteSet: make(map[common.Address]map[AccountState]*RWTxList, txCount), slotWriteSet: make(map[common.Address]map[common.Hash]*RWTxList, txCount), + accReadSet: make(map[common.Address]map[AccountState]*RWTxList, txCount), + slotReadSet: make(map[common.Address]map[common.Hash]*RWTxList, txCount), rwEventCh: make(chan []RWEventItem, 100), gasFeeReceivers: gasFeeReceivers, } @@ -394,6 +395,14 @@ func (s *MVStates) Copy() *MVStates { ns.accWriteSet[addr][state] = writes.Copy() } } + for addr, sub := range s.accReadSet { + for state, reads := range sub { + if _, ok := ns.accReadSet[addr]; !ok { + ns.accReadSet[addr] = make(map[AccountState]*RWTxList) + } + ns.accReadSet[addr][state] = reads.Copy() + } + } for addr, sub := range s.slotWriteSet { for slot, writes := range sub { if _, ok := ns.slotWriteSet[addr]; !ok { @@ -402,6 +411,14 @@ func (s *MVStates) Copy() *MVStates { ns.slotWriteSet[addr][slot] = writes.Copy() } } + for addr, sub := range s.slotReadSet { + for slot, reads := range sub { + if _, ok := ns.slotReadSet[addr]; !ok { + ns.slotReadSet[addr] = make(map[common.Hash]*RWTxList) + } + ns.slotReadSet[addr][slot] = reads.Copy() + } + } return ns } @@ -421,18 +438,24 @@ func (s *MVStates) asyncRWEventLoop() { func (s *MVStates) handleRWEvents(items []RWEventItem) { readFrom, readTo := -1, -1 + writeFrom, writeTo := -1, -1 recordNewTx := false for i, item := range items { // init next RWSet, and finalise previous RWSet if item.Event == NewTxRWEvent { // handle previous rw set if recordNewTx { - var prevItems []RWEventItem + var prevReadItems []RWEventItem if readFrom >= 0 && readTo > readFrom { - prevItems = items[readFrom:readTo] + prevReadItems = items[readFrom:readTo] } - s.finalisePreviousRWSet(prevItems) + var prevWriteItems []RWEventItem + if writeFrom >= 0 && writeTo > writeFrom { + prevWriteItems = items[writeFrom:writeTo] + } + s.finalisePreviousRWSet(prevReadItems, prevWriteItems) readFrom, readTo = -1, -1 + writeFrom, writeTo = -1, -1 } recordNewTx = true s.asyncRWSet = RWSet{ @@ -445,15 +468,29 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { } switch item.Event { // recorde current read/write event - case ReadAccRWEvent, ReadSlotRWEvent: - // TODO: maintain read list here + case ReadAccRWEvent: + if readFrom < 0 { + readFrom = i + } + readTo = i + 1 + s.finaliseAccRead(s.asyncRWSet.index, item.Addr, item.State) + case ReadSlotRWEvent: if readFrom < 0 { readFrom = i } readTo = i + 1 + s.finaliseSlotRead(s.asyncRWSet.index, item.Addr, item.Slot) case WriteAccRWEvent: + if writeFrom < 0 { + writeFrom = i + } + writeTo = i + 1 s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) case WriteSlotRWEvent: + if writeFrom < 0 { + writeFrom = i + } + writeTo = i + 1 s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) // recorde current as cannot gas fee delay case CannotGasFeeDelayRWEvent: @@ -462,15 +499,19 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { } // handle last tx rw set if recordNewTx { - var prevItems []RWEventItem + var prevReadItems []RWEventItem if readFrom >= 0 && readTo > readFrom { - prevItems = items[readFrom:readTo] + prevReadItems = items[readFrom:readTo] + } + var prevWriteItems []RWEventItem + if writeFrom >= 0 && writeTo > writeFrom { + prevWriteItems = items[writeFrom:writeTo] } - s.finalisePreviousRWSet(prevItems) + s.finalisePreviousRWSet(prevReadItems, prevWriteItems) } } -func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem) { +func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem, writes []RWEventItem) { if s.asyncRWSet.index < 0 { return } @@ -486,7 +527,7 @@ func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem) { } // reset nextFinaliseIndex to index+1, it may revert to previous txs s.nextFinaliseIndex = index + 1 - s.resolveDepsMapCacheByWrites(index, reads) + s.resolveDepsMapCacheByWrites(index, reads, writes) } func (s *MVStates) RecordNewTx(index int) { @@ -669,7 +710,26 @@ func (s *MVStates) FinaliseWithRWSet(rwSet *RWSet) error { }) } } - s.resolveDepsMapCacheByWrites(i, reads) + writes := make([]RWEventItem, 0, len(s.rwSets[i].accWriteSet)+len(s.rwSets[i].slotWriteSet)) + for addr, sub := range s.rwSets[i].accWriteSet { + for state := range sub { + writes = append(writes, RWEventItem{ + Event: WriteAccRWEvent, + Addr: addr, + State: state, + }) + } + } + for addr, sub := range s.rwSets[i].slotWriteSet { + for slot := range sub { + writes = append(writes, RWEventItem{ + Event: WriteSlotRWEvent, + Addr: addr, + Slot: slot, + }) + } + } + s.resolveDepsMapCacheByWrites(i, reads, writes) } return nil @@ -698,22 +758,44 @@ func (s *MVStates) innerFinalise(index int, applyWriteSet bool) error { } for state := range sub { if _, exist := s.accWriteSet[addr][state]; !exist { - s.accWriteSet[addr][state] = NewStateWrites() + s.accWriteSet[addr][state] = NewRWTxList() } s.accWriteSet[addr][state].Append(index) } } + for addr, sub := range rwSet.accReadSet { + if _, exist := s.accReadSet[addr]; !exist { + s.accReadSet[addr] = make(map[AccountState]*RWTxList) + } + for state := range sub { + if _, exist := s.accReadSet[addr][state]; !exist { + s.accReadSet[addr][state] = NewRWTxList() + } + s.accReadSet[addr][state].Append(index) + } + } for addr, sub := range rwSet.slotWriteSet { if _, exist := s.slotWriteSet[addr]; !exist { s.slotWriteSet[addr] = make(map[common.Hash]*RWTxList) } for slot := range sub { if _, exist := s.slotWriteSet[addr][slot]; !exist { - s.slotWriteSet[addr][slot] = NewStateWrites() + s.slotWriteSet[addr][slot] = NewRWTxList() } s.slotWriteSet[addr][slot].Append(index) } } + for addr, sub := range rwSet.slotReadSet { + if _, exist := s.slotReadSet[addr]; !exist { + s.slotReadSet[addr] = make(map[common.Hash]*RWTxList) + } + for slot := range sub { + if _, exist := s.slotReadSet[addr][slot]; !exist { + s.slotReadSet[addr][slot] = NewRWTxList() + } + s.slotReadSet[addr][slot].Append(index) + } + } return nil } @@ -723,22 +805,44 @@ func (s *MVStates) finaliseSlotWrite(index int, addr common.Address, slot common s.slotWriteSet[addr] = make(map[common.Hash]*RWTxList) } if _, exist := s.slotWriteSet[addr][slot]; !exist { - s.slotWriteSet[addr][slot] = NewStateWrites() + s.slotWriteSet[addr][slot] = NewRWTxList() } s.slotWriteSet[addr][slot].Append(index) } +func (s *MVStates) finaliseSlotRead(index int, addr common.Address, slot common.Hash) { + // append to pending read set + if _, exist := s.slotReadSet[addr]; !exist { + s.slotReadSet[addr] = make(map[common.Hash]*RWTxList) + } + if _, exist := s.slotReadSet[addr][slot]; !exist { + s.slotReadSet[addr][slot] = NewRWTxList() + } + s.slotReadSet[addr][slot].Append(index) +} + func (s *MVStates) finaliseAccWrite(index int, addr common.Address, state AccountState) { // append to pending write set if _, exist := s.accWriteSet[addr]; !exist { s.accWriteSet[addr] = make(map[AccountState]*RWTxList) } if _, exist := s.accWriteSet[addr][state]; !exist { - s.accWriteSet[addr][state] = NewStateWrites() + s.accWriteSet[addr][state] = NewRWTxList() } s.accWriteSet[addr][state].Append(index) } +func (s *MVStates) finaliseAccRead(index int, addr common.Address, state AccountState) { + // append to pending read set + if _, exist := s.accReadSet[addr]; !exist { + s.accReadSet[addr] = make(map[AccountState]*RWTxList) + } + if _, exist := s.accReadSet[addr][state]; !exist { + s.accReadSet[addr][state] = NewRWTxList() + } + s.accReadSet[addr][state].Append(index) +} + func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *RWTxList { if _, exist := s.accWriteSet[addr]; !exist { return nil @@ -746,6 +850,13 @@ func (s *MVStates) queryAccWrites(addr common.Address, state AccountState) *RWTx return s.accWriteSet[addr][state] } +func (s *MVStates) queryAccReads(addr common.Address, state AccountState) *RWTxList { + if _, exist := s.accReadSet[addr]; !exist { + return nil + } + return s.accReadSet[addr][state] +} + func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *RWTxList { if _, exist := s.slotWriteSet[addr]; !exist { return nil @@ -753,8 +864,15 @@ func (s *MVStates) querySlotWrites(addr common.Address, slot common.Hash) *RWTxL return s.slotWriteSet[addr][slot] } +func (s *MVStates) querySlotReads(addr common.Address, slot common.Hash) *RWTxList { + if _, exist := s.slotReadSet[addr]; !exist { + return nil + } + return s.slotReadSet[addr][slot] +} + // resolveDepsMapCacheByWrites must be executed in order -func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { +func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem, writes []RWEventItem) { for index >= len(s.txDepCache) { s.txDepCache = append(s.txDepCache, TxDep{}) } @@ -769,14 +887,14 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { // check tx dependency, only check key for _, item := range reads { // check account states & slots - var writes *RWTxList + var depWrites *RWTxList if item.Event == ReadAccRWEvent { - writes = s.queryAccWrites(item.Addr, item.State) + depWrites = s.queryAccWrites(item.Addr, item.State) } else { - writes = s.querySlotWrites(item.Addr, item.Slot) + depWrites = s.querySlotWrites(item.Addr, item.Slot) } - if writes != nil { - if find := writes.FindLastWrite(index); find >= 0 { + if depWrites != nil { + if find := depWrites.FindLastTx(index); find >= 0 { if tx := uint64(find); !depSlice.exist(tx) { depSlice.add(tx) } @@ -788,16 +906,36 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem) { continue } addrMap[item.Addr] = struct{}{} - writes = s.queryAccWrites(item.Addr, AccountSuicide) - if writes != nil { - if find := writes.FindLastWrite(index); find >= 0 { + depWrites = s.queryAccWrites(item.Addr, AccountSuicide) + if depWrites != nil { + if find := depWrites.FindLastTx(index); find >= 0 { if tx := uint64(find); !depSlice.exist(tx) { depSlice.add(tx) } } } } - // TODO: check read before write dependency here + // Looking for read operations before write operations, similar to a read->read->read/write execution sequence, + // we need the write transaction to occur after the read transactions. + for _, item := range writes { + var depReads *RWTxList + if item.Event == WriteAccRWEvent { + depReads = s.queryAccReads(item.Addr, item.State) + } else { + depReads = s.querySlotReads(item.Addr, item.Slot) + } + if depReads != nil { + if finds := depReads.FindPrevTxs(index); len(finds) >= 0 { + for _, tx := range finds { + tx := uint64(tx) + if !depSlice.exist(tx) { + depSlice.add(tx) + } + } + } + } + } + for _, addr := range s.gasFeeReceivers { if _, ok := addrMap[addr]; ok { rwSet.cannotGasFeeDelay = true From 14ae50e8f58b3e78262e7ca292cb12864be4dbc6 Mon Sep 17 00:00:00 2001 From: welkin22 Date: Tue, 24 Sep 2024 16:38:33 +0800 Subject: [PATCH 36/42] comments --- core/types/mvstates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index f6d0e0a49a..be3d0ad20b 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -915,7 +915,7 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem, w } } } - // Looking for read operations before write operations, similar to a read->read->read/write execution sequence, + // Looking for read operations before write operations, e.g: read->read->read/write execution sequence, // we need the write transaction to occur after the read transactions. for _, item := range writes { var depReads *RWTxList From b6f55f77b653c9a571a1e29ba1fc11517de30e76 Mon Sep 17 00:00:00 2001 From: galaio Date: Wed, 25 Sep 2024 14:21:26 +0800 Subject: [PATCH 37/42] txdag: add suicide checking logic; --- core/types/mvstates.go | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index be3d0ad20b..759a12308d 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -245,6 +245,24 @@ type RWEventItem struct { Slot common.Hash } +func (e RWEventItem) String() string { + switch e.Event { + case NewTxRWEvent: + return fmt.Sprintf("(%v)%v", e.Event, e.Index) + case ReadAccRWEvent: + return fmt.Sprintf("(%v)%v|%v", e.Event, e.Addr, e.State) + case WriteAccRWEvent: + return fmt.Sprintf("(%v)%v|%v", e.Event, e.Addr, e.State) + case ReadSlotRWEvent: + return fmt.Sprintf("(%v)%v|%v", e.Event, e.Addr, e.Slot) + case WriteSlotRWEvent: + return fmt.Sprintf("(%v)%v|%v", e.Event, e.Addr, e.Slot) + case CannotGasFeeDelayRWEvent: + return fmt.Sprintf("(%v)", e.Event) + } + return "Unknown" +} + type RWTxList struct { list []int } @@ -914,13 +932,20 @@ func (s *MVStates) resolveDepsMapCacheByWrites(index int, reads []RWEventItem, w } } } + // append AccountSelf event + s.finaliseAccRead(index, item.Addr, AccountSelf) } // Looking for read operations before write operations, e.g: read->read->read/write execution sequence, // we need the write transaction to occur after the read transactions. for _, item := range writes { var depReads *RWTxList if item.Event == WriteAccRWEvent { - depReads = s.queryAccReads(item.Addr, item.State) + // if here is AccountSuicide write, check AccountSelf read + state := item.State + if state == AccountSuicide { + state = AccountSelf + } + depReads = s.queryAccReads(item.Addr, state) } else { depReads = s.querySlotReads(item.Addr, item.Slot) } From 2bf1bb6a1b89613832d9653dbfe30b33fb94ccdb Mon Sep 17 00:00:00 2001 From: welkin22 Date: Thu, 26 Sep 2024 16:46:38 +0800 Subject: [PATCH 38/42] txdag: optimize the size of readSet --- core/types/mvstates.go | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 759a12308d..68e132104d 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -333,6 +333,10 @@ func (w *RWTxList) Copy() *RWTxList { return np } +func (w *RWTxList) Remove(idx int) { + w.list = append(w.list[:idx], w.list[idx+1:]...) +} + var ( rwEventsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwevents/cnt", nil) rwEventsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwevents/gauge", nil) @@ -491,25 +495,21 @@ func (s *MVStates) handleRWEvents(items []RWEventItem) { readFrom = i } readTo = i + 1 - s.finaliseAccRead(s.asyncRWSet.index, item.Addr, item.State) case ReadSlotRWEvent: if readFrom < 0 { readFrom = i } readTo = i + 1 - s.finaliseSlotRead(s.asyncRWSet.index, item.Addr, item.Slot) case WriteAccRWEvent: if writeFrom < 0 { writeFrom = i } writeTo = i + 1 - s.finaliseAccWrite(s.asyncRWSet.index, item.Addr, item.State) case WriteSlotRWEvent: if writeFrom < 0 { writeFrom = i } writeTo = i + 1 - s.finaliseSlotWrite(s.asyncRWSet.index, item.Addr, item.Slot) // recorde current as cannot gas fee delay case CannotGasFeeDelayRWEvent: s.asyncRWSet.cannotGasFeeDelay = true @@ -539,6 +539,34 @@ func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem, writes []RWEventIt } s.rwSets[index] = s.asyncRWSet + for _, item := range writes { + if item.Event == WriteAccRWEvent { + s.finaliseAccWrite(item.Index, item.Addr, item.State) + } else if item.Event == WriteSlotRWEvent { + s.finaliseSlotWrite(item.Index, item.Addr, item.Slot) + } + } + + for _, item := range reads { + if item.Event == ReadAccRWEvent { + accWrites := s.queryAccWrites(item.Addr, item.State) + if accWrites != nil { + if _, ok := accWrites.SearchTxIndex(item.Index); ok { + continue + } + } + s.finaliseAccRead(item.Index, item.Addr, item.State) + } else if item.Event == ReadSlotRWEvent { + slotWrites := s.querySlotWrites(item.Addr, item.Slot) + if slotWrites != nil { + if _, ok := slotWrites.SearchTxIndex(item.Index); ok { + continue + } + } + s.finaliseSlotRead(item.Index, item.Addr, item.Slot) + } + } + if index > s.nextFinaliseIndex { log.Error("finalise in wrong order", "next", s.nextFinaliseIndex, "input", index) return From 6e251dcd211c0a915737fcd7cb205d067a8a5a1e Mon Sep 17 00:00:00 2001 From: welkin22 Date: Thu, 26 Sep 2024 16:50:41 +0800 Subject: [PATCH 39/42] remove remove method --- core/types/mvstates.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index 68e132104d..e7cc162afa 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -333,10 +333,6 @@ func (w *RWTxList) Copy() *RWTxList { return np } -func (w *RWTxList) Remove(idx int) { - w.list = append(w.list[:idx], w.list[idx+1:]...) -} - var ( rwEventsAllocMeter = metrics.GetOrRegisterMeter("mvstate/alloc/rwevents/cnt", nil) rwEventsAllocGauge = metrics.GetOrRegisterGauge("mvstate/alloc/rwevents/gauge", nil) From 9c397a9257c98e8e2d0fb92bec9cf88f7cc50fc6 Mon Sep 17 00:00:00 2001 From: welkin22 Date: Thu, 26 Sep 2024 18:36:45 +0800 Subject: [PATCH 40/42] fix: incorrect index --- core/types/mvstates.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/types/mvstates.go b/core/types/mvstates.go index e7cc162afa..d64afd1281 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -537,9 +537,9 @@ func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem, writes []RWEventIt for _, item := range writes { if item.Event == WriteAccRWEvent { - s.finaliseAccWrite(item.Index, item.Addr, item.State) + s.finaliseAccWrite(index, item.Addr, item.State) } else if item.Event == WriteSlotRWEvent { - s.finaliseSlotWrite(item.Index, item.Addr, item.Slot) + s.finaliseSlotWrite(index, item.Addr, item.Slot) } } @@ -547,19 +547,19 @@ func (s *MVStates) finalisePreviousRWSet(reads []RWEventItem, writes []RWEventIt if item.Event == ReadAccRWEvent { accWrites := s.queryAccWrites(item.Addr, item.State) if accWrites != nil { - if _, ok := accWrites.SearchTxIndex(item.Index); ok { + if _, ok := accWrites.SearchTxIndex(index); ok { continue } } - s.finaliseAccRead(item.Index, item.Addr, item.State) + s.finaliseAccRead(index, item.Addr, item.State) } else if item.Event == ReadSlotRWEvent { slotWrites := s.querySlotWrites(item.Addr, item.Slot) if slotWrites != nil { - if _, ok := slotWrites.SearchTxIndex(item.Index); ok { + if _, ok := slotWrites.SearchTxIndex(index); ok { continue } } - s.finaliseSlotRead(item.Index, item.Addr, item.Slot) + s.finaliseSlotRead(index, item.Addr, item.Slot) } } From f206e6d2619ac8bfbd841168e94e8cdf1f775c61 Mon Sep 17 00:00:00 2001 From: welkin22 Date: Fri, 11 Oct 2024 11:41:59 +0800 Subject: [PATCH 41/42] Remove redundant data when generating NonDependentRelFlag --- core/types/dag_test.go | 2 +- core/types/mvstates.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/types/dag_test.go b/core/types/dag_test.go index de4f3e1c44..b4acf1a1e3 100644 --- a/core/types/dag_test.go +++ b/core/types/dag_test.go @@ -156,7 +156,7 @@ func mockSystemTxDAGWithLargeDeps() TxDAG { dag.TxDeps[7].TxIndexes = []uint64{3} dag.TxDeps[8].TxIndexes = []uint64{} //dag.TxDeps[9].TxIndexes = []uint64{0, 1, 2, 6, 7, 8} - dag.TxDeps[9] = NewTxDep([]uint64{3, 4, 5, 10, 11}, NonDependentRelFlag) + dag.TxDeps[9] = NewTxDep([]uint64{3, 4, 5}, NonDependentRelFlag) dag.TxDeps[10] = NewTxDep([]uint64{}, ExcludedTxFlag) dag.TxDeps[11] = NewTxDep([]uint64{}, ExcludedTxFlag) return dag diff --git a/core/types/mvstates.go b/core/types/mvstates.go index d64afd1281..71ea2e4a14 100644 --- a/core/types/mvstates.go +++ b/core/types/mvstates.go @@ -1036,8 +1036,8 @@ func (s *MVStates) ResolveTxDAG(txCnt int, extraTxDeps ...TxDep) (TxDAG, error) // if tx deps larger than half of txs, then convert with NonDependentRelFlag txDAG.TxDeps[i].SetFlag(NonDependentRelFlag) nd := make([]uint64, 0, totalCnt-1-len(txDAG.TxDeps[i].TxIndexes)) - for j := uint64(0); j < uint64(totalCnt); j++ { - if !slices.Contains(txDAG.TxDeps[i].TxIndexes, j) && j != uint64(i) { + for j := uint64(0); j < uint64(i); j++ { + if !slices.Contains(txDAG.TxDeps[i].TxIndexes, j) { nd = append(nd, j) } } From 6a61b737d6f8bcac8138b2bf038ef7a73167b850 Mon Sep 17 00:00:00 2001 From: welkin22 Date: Mon, 25 Nov 2024 15:00:33 +0800 Subject: [PATCH 42/42] modify according to the comments --- miner/worker.go | 1 - tests/block_test.go | 16 ---------------- 2 files changed, 17 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index bfd60f2536..5cf742bcd5 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1076,7 +1076,6 @@ func (w *worker) generateDAGTx(statedb *state.StateDB, signer types.Signer, txIn return nil, fmt.Errorf("current signer is nil") } - //privateKey, err := crypto.HexToECDSA(privateKeyHex) sender := w.config.ParallelTxDAGSenderPriv if sender == nil { return nil, fmt.Errorf("missing sender private key") diff --git a/tests/block_test.go b/tests/block_test.go index a650ca07f3..0457510821 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -74,7 +74,6 @@ func TestExecutionSpecBlocktests(t *testing.T) { } func TestBlockchainWithTxDAG(t *testing.T) { - //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) bt := new(testMatcher) // General state tests are 'exported' as blockchain tests, but we can run them natively. // For speedier CI-runs, the line below can be uncommented, so those are skipped. @@ -108,21 +107,6 @@ func TestBlockchainWithTxDAG(t *testing.T) { return } }) - - //bt := new(testMatcher) - //path := filepath.Join(blockTestDir, "ValidBlocks", "bcStatetests", "refundReset.json") - //_, name := filepath.Split(path) - //t.Run(name, func(t *testing.T) { - // bt.runTestFile(t, path, name, func(t *testing.T, name string, test *BlockTest) { - // if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 { - // t.Skip("test (randomly) skipped on 32-bit windows") - // } - // if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, true, nil)); err != nil { - // t.Errorf("test in path mode with snapshotter failed: %v", err) - // return - // } - // }) - //}) } func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {