Skip to content

Commit

Permalink
debug log for fork
Browse files Browse the repository at this point in the history
  • Loading branch information
sunny2022da committed Aug 14, 2024
1 parent 7eef44c commit 55fb1e9
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 3 deletions.
30 changes: 30 additions & 0 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package state
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/log"
"io"
"math/big"
"sync"
Expand Down Expand Up @@ -535,6 +536,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
s.storageRecordsLock.Lock()
defer s.storageRecordsLock.Unlock()
}

if true {
log.Debug(fmt.Sprintf("Dav - updating trie before finalize - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))

}
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false)

Expand Down Expand Up @@ -578,6 +585,11 @@ func (s *stateObject) updateTrie() (Trie, error) {
dirtyStorage[key] = v
return true
})
if true {
log.Debug(fmt.Sprintf("Dav - updating trie before cal trie - addr: %s, s.data.root: %s, s.trie.hash: %s Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.data.Root, s.trie.Hash(), s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))

}
var wg sync.WaitGroup
wg.Add(1)
go func() {
Expand Down Expand Up @@ -647,6 +659,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
}

s.pendingStorage = newStorage(s.isParallel) // reset pending map

if true {
log.Debug(fmt.Sprintf("Dav - updating trie before exit - addr: %s, s.data.root: %s, s.trie.hash: %s\n",
s.address, s.data.Root, s.trie.Hash()))

}
return tr, nil
}

Expand Down Expand Up @@ -1008,6 +1026,13 @@ func (s *stateObject) fixUpOriginAndResetPendingStorage() {
if origObj != nil && origObj.originStorage.Length() != 0 {
// There can be racing issue with CopyForSlot/LightCopy
origObj.storageRecordsLock.RLock()
if true {
log.Debug(fmt.Sprintf("\n----vvvvvvvvvv----\nDav - fixUpOriginAndResetPendingStorage before fixup - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\nmainDBObj(Index: %d): Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String(),
mainDB.TxIndex(),
origObj.pendingStorage.String(), origObj.dirtyStorage.String(), origObj.originStorage.String()))

}
originStorage := origObj.originStorage.Copy()
origObj.storageRecordsLock.RUnlock()
// During the tx execution, the originStorage can be updated with GetCommittedState()
Expand All @@ -1024,10 +1049,15 @@ func (s *stateObject) fixUpOriginAndResetPendingStorage() {
})
s.originStorage = originStorage
}

// isParallel is unnecessary since the pendingStorage for slotObject will be used serially from now on.
if s.pendingStorage.Length() > 0 {
s.pendingStorage = newStorage(false)
}
if true {
log.Debug(fmt.Sprintf("Dav - fixUpOriginAndResetPendingStorage after fixup - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n----^^^^^^^^^^----\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))
}
s.storageRecordsLock.Unlock()
}
}
49 changes: 46 additions & 3 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1506,7 +1506,9 @@ func (s *StateDB) GetRefund() uint64 {
// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))

if true {
log.Debug(fmt.Sprintf("Dav - Finalise - enter - tx: %d\n", s.txIndex))
}
// finalise stateObjectsDestruct
// The finalise of stateDB is called at verify & commit phase, which is global, no need to acquire the lock.
for addr, acc := range s.stateObjectsDestructDirty {
Expand Down Expand Up @@ -1585,6 +1587,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()

if true {
log.Debug(fmt.Sprintf("Dav - Finalise - exit - tx: %d\ns.stateObjectsPending(%d) %v\n", s.txIndex, len(s.stateObjectsPending), s.stateObjectsPending))
//debug.PrintStack()
}
}

// IntermediateRoot computes the current root hash of the state trie.
Expand All @@ -1593,9 +1600,22 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// TODO: For parallel SlotDB, IntermediateRootForSlot is used, need to clean up this method.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Finalise all the dirty storage states and write them into the tries
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, before Finalise, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
s.Finalise(deleteEmptyObjects)
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, before AccountsIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
s.AccountsIntermediateRoot()
return s.StateIntermediateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, after AccountsIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
res := s.StateIntermediateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, after StateIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
return res
}

func (s *StateDB) AccountsIntermediateRoot() {
Expand Down Expand Up @@ -1627,8 +1647,13 @@ func (s *StateDB) AccountsIntermediateRoot() {
wg.Add(1)
tasks <- func() {
defer wg.Done()
if false {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(P) - tx: %d, before obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
obj.updateRoot()

if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(P) - tx: %d, after obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
// Cache the data until commit. Note, this update mechanism is not symmetric
// to the deletion, because whereas it is enough to track account updates
// at commit time, deletions need tracking at transaction boundary level to
Expand All @@ -1643,7 +1668,13 @@ func (s *StateDB) AccountsIntermediateRoot() {
wg.Add(1)
tasks <- func() {
defer wg.Done()
if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(s) - tx: %d, before obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
obj.updateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(s) - tx: %d, after obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}

// Cache the data until commit. Note, this update mechanism is not symmetric
// to the deletion, because whereas it is enough to track account updates
Expand Down Expand Up @@ -1701,16 +1732,28 @@ func (s *StateDB) StateIntermediateRoot() common.Hash {
if s.parallel.isSlotDB {
if obj := s.parallel.dirtiedStateObjectsInSlot[addr]; obj.deleted {
s.deleteStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(p) - State Processing,deleteStateObj: %s\n", obj.address))
}
s.AccountDeleted += 1
} else {
s.updateStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(p) - State Processing,updateStateObj: %s, obj.data: %v\n", obj.address, obj.data))
}
s.AccountUpdated += 1
}
} else if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted {
s.deleteStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(s) - State Processing,deleteStateObj: %s\n", obj.address))
}
s.AccountDeleted += 1
} else {
s.updateStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(s, %v)- State Processing,updateStateObj: %s, obj.data: %v\n", s.isParallel, obj.address, obj.data))
}
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
Expand Down

0 comments on commit 55fb1e9

Please sign in to comment.