Skip to content

Commit

Permalink
fix more lints
Browse files Browse the repository at this point in the history
  • Loading branch information
wlawt committed Apr 22, 2024
1 parent bd656aa commit 536b9db
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 29 deletions.
36 changes: 19 additions & 17 deletions chain/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ func BuildBlock(
log.Warn("invalid tx: invalid state keys")
return nil
}
result, err := tx.Execute(
txResults, err := tx.Execute(
ctx,
feeManager,
reads,
Expand All @@ -362,29 +362,31 @@ func BuildBlock(
defer blockLock.Unlock()

// Ensure block isn't too big
if ok, dimension := feeManager.Consume(result.Consumed, maxUnits); !ok {
log.Debug(
"skipping tx: too many units",
zap.Int("dimension", int(dimension)),
zap.Uint64("tx", result.Consumed[dimension]),
zap.Uint64("block units", feeManager.LastConsumed(dimension)),
zap.Uint64("max block units", maxUnits[dimension]),
)
restore = true
for _, result := range txResults {
if ok, dimension := feeManager.Consume(result.Consumed, maxUnits); !ok {
log.Debug(
"skipping tx: too many units",
zap.Int("dimension", int(dimension)),
zap.Uint64("tx", result.Consumed[dimension]),
zap.Uint64("block units", feeManager.LastConsumed(dimension)),
zap.Uint64("max block units", maxUnits[dimension]),
)
restore = true

// If we are above the target for the dimension we can't consume, we will
// stop building. This prevents a full mempool iteration looking for the
// "perfect fit".
if feeManager.LastConsumed(dimension) >= targetUnits[dimension] {
stop = true
return errBlockFull
// If we are above the target for the dimension we can't consume, we will
// stop building. This prevents a full mempool iteration looking for the
// "perfect fit".
if feeManager.LastConsumed(dimension) >= targetUnits[dimension] {
stop = true
return errBlockFull
}
}
}

// Update block with new transaction
tsv.Commit()
b.Txs = append(b.Txs, tx)
results = append(results, result)
results = append(results, txResults...)
if tx.WarpMessage != nil {
if warpErr == nil {
// Add a bit if the warp message was verified
Expand Down
16 changes: 9 additions & 7 deletions chain/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (b *StatelessBlock) Execute(
f = fetcher.New(im, numTxs, b.vm.GetStateFetchConcurrency())
e = executor.New(numTxs, b.vm.GetTransactionExecutionCores(), MaxKeyDependencies, b.vm.GetExecutorVerifyRecorder())
ts = tstate.New(numTxs * 2) // TODO: tune this heuristic
results = make([]*Result, numTxs)
results = make([]*Result, 0)
)

// Fetch required keys and execute transactions
Expand Down Expand Up @@ -89,16 +89,18 @@ func (b *StatelessBlock) Execute(
return ctx.Err()
}
}
result, err := tx.Execute(ctx, feeManager, reads, sm, r, tsv, t, ok && warpVerified)
txResults, err := tx.Execute(ctx, feeManager, reads, sm, r, tsv, t, ok && warpVerified)
if err != nil {
return err
}
results[i] = result
results = append(results, txResults...)

// Update block metadata with units actually consumed (if more is consumed than block allows, we will non-deterministically
// exit with an error based on which tx over the limit is processed first)
if ok, d := feeManager.Consume(result.Consumed, r.GetMaxBlockUnits()); !ok {
return fmt.Errorf("%w: %d too large", ErrInvalidUnitsConsumed, d)
for _, result := range txResults {
// Update block metadata with units actually consumed (if more is consumed than block allows, we will non-deterministically
// exit with an error based on which tx over the limit is processed first)
if ok, d := feeManager.Consume(result.Consumed, r.GetMaxBlockUnits()); !ok {
return fmt.Errorf("%w: %d too large", ErrInvalidUnitsConsumed, d)
}
}

// Commit results to parent [TState]
Expand Down
10 changes: 5 additions & 5 deletions chain/transaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ func (t *Transaction) Execute(
}
}

results = append(results, *Result{
results = append(results, &Result{
Success: success,
Output: output,

Expand All @@ -556,8 +556,8 @@ func (t *Transaction) Marshal(p *codec.Packer) error {
}

// TODO: do I need all this within the loop?
for idx, action := range t.Actions {
actionID := action.GetActionID(idx, t.id)
for i, action := range t.Actions {
actionID := action.GetActionID(uint8(i), t.id)
authID := t.Auth.GetTypeID()
t.Base.Marshal(p)
var warpBytes []byte
Expand All @@ -568,7 +568,7 @@ func (t *Transaction) Marshal(p *codec.Packer) error {
}
}
p.PackBytes(warpBytes)
p.PackByte(actionID)
p.PackAddress(actionID)
action.Marshal(p)
p.PackByte(authID)
t.Auth.Marshal(p)
Expand Down Expand Up @@ -683,7 +683,7 @@ func UnmarshalTx(

var tx Transaction
tx.Base = base
tx.Action = action
tx.Actions = []Action{action}
tx.WarpMessage = warpMessage
tx.Auth = auth
if err := p.Err(); err != nil {
Expand Down

0 comments on commit 536b9db

Please sign in to comment.