Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[R4R] feat: fast sync #1397

Merged
merged 26 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
e084e0f
finish rollup gas optimize
Ethanncnm Aug 30, 2023
f877f61
code optimization and refactoring
Sep 14, 2023
5d987cd
add some batch metric
Sep 14, 2023
78bf8ae
[R4R]: Hotfix batch submitter optimization for release0.4.3 (#1370)
guoshijiang Sep 26, 2023
ee48a8d
add wstETH
shellteo Nov 2, 2023
da4444c
[R4R]-[l2geth]feat: add data in eth_calls response when revert
Tri-stone Oct 31, 2023
d489295
[R4R]-[l2geth]feat: add data in eth_calls response when revert
Tri-stone Nov 6, 2023
b1d015a
[R4R]-[l2geth]feat: add data in eth_call's response when revert (#1374)
Tri-stone Nov 6, 2023
e3e2240
Update api.go
idyllsss Nov 16, 2023
b38d43e
Bugfix/Remix estimate gas err (#1375)
Tri-stone Nov 16, 2023
e2426b1
bug fix about estimateGas rpc parameters (#1379)
idyllsss Nov 29, 2023
13f56e5
[l2geth]: RPC support query block with safe and finalized (#1385)
idyllsss Dec 19, 2023
433d674
request block concurrently
byteflyfunny Dec 21, 2023
7893bd0
request block concurrently
byteflyfunny Dec 21, 2023
216c06a
request block concurrently
byteflyfunny Dec 22, 2023
237c955
request block concurrently
byteflyfunny Dec 22, 2023
a4cea8f
add mainnet wstETH
shellteo Dec 22, 2023
59fe2c6
upgrade sdk to 0.2.3
shellteo Dec 22, 2023
f84e0a6
Merge branch 'release/v0.4.3' into albert/dev
shellteo Dec 22, 2023
40ee325
[R4R] SDK support wstETH bridge (#1388)
shellteo Dec 22, 2023
95e003b
[R4R] DTL response null result with 404 http status (#1391)
shidaxi Dec 22, 2023
3352a04
request block concurrently
byteflyfunny Jan 4, 2024
0ce42e9
Merge branch 'release/v0.4.3' into hotfix/rpc
Tri-stone Jan 5, 2024
195f864
add sleep time during retry to get block
byteflyfunny Jan 5, 2024
c0334b5
debug 100 concurrency
byteflyfunny Jan 8, 2024
6c8e920
break out of the for loop and stop subsequent request calls
byteflyfunny Jan 9, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
154 changes: 6 additions & 148 deletions batch-submitter/drivers/sequencer/batch.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,9 @@
package sequencer

import (
"errors"
"fmt"
l2types "github.com/mantlenetworkio/mantle/l2geth/core/types"
)

var (
// ErrBlockWithInvalidContext signals an attempt to generate a
// BatchContext that specifies a total of zero txs.
ErrBlockWithInvalidContext = errors.New("attempted to generate batch " +
"context with 0 queued and 0 sequenced txs")
l2types "github.com/mantlenetworkio/mantle/l2geth/core/types"
)

// BatchElement reflects the contents of an atomic update to the L2 state.
Expand All @@ -23,16 +16,8 @@ type BatchElement struct {
// BlockNumber is the L1 BlockNumber of the batch.
BlockNumber uint64

// Tx is the optional transaction that was applied in this batch.
//
// NOTE: This field will only be populated for sequencer txs.
Tx *CachedTx
}

// IsSequencerTx returns true if this batch contains a tx that needs to be
// posted to the L1 CTC contract.
func (b *BatchElement) IsSequencerTx() bool {
return b.Tx != nil
// whether it is a Sequencer transaction
IsSequencerTx bool
}

// BatchElementFromBlock constructs a BatchElement from a single L2 block. This
Expand All @@ -53,136 +38,9 @@ func BatchElementFromBlock(block *l2types.Block) BatchElement {
isSequencerTx := tx.QueueOrigin() == l2types.QueueOriginSequencer

// Only include sequencer txs in the returned BatchElement.
var cachedTx *CachedTx
if isSequencerTx {
cachedTx = NewCachedTx(tx)
}

return BatchElement{
Timestamp: block.Time(),
BlockNumber: l1BlockNumber,
Tx: cachedTx,
}
}

type groupedBlock struct {
sequenced []BatchElement
queued []BatchElement
}

// GenSequencerBatchParams generates a valid AppendSequencerBatchParams from a
// list of BatchElements. The BatchElements are assumed to be ordered in
// ascending order by L2 block height.
func GenSequencerBatchParams(
shouldStartAtElement uint64,
blockOffset uint64,
batch []BatchElement,
) (*AppendSequencerBatchParams, error) {
var (
contexts []BatchContext
groupedBlocks []groupedBlock
txs []*CachedTx
lastBlockIsSequencerTx bool
lastTimestamp uint64
lastBlockNumber uint64
)
// Iterate over the batch elements, grouping the elements according to
// the following criteria:
// - All txs in the same group must have the same timestamp.
// - All sequencer txs in the same group must have the same block number.
// - If sequencer txs exist in a group, they must come before all
// queued txs.
//
// Assuming the block and timestamp criteria for sequencer txs are
// respected within each group, the following are examples of groupings:
// - [s] // sequencer can exist by itself
// - [q] // ququed tx can exist by itself
// - [s] [s] // differing sequencer tx timestamp/blocknumber
// - [s q] [s] // sequencer tx must precede queued tx in group
// - [q] [q s] // INVALID: consecutive queued txs are split
// - [q q] [s] // correct split for preceding case
// - [s q] [s q] // alternating sequencer tx interleaved with queued
for _, el := range batch {
// To enforce the above groupings, the following condition is
// used to determine when to create a new batch:
// - On the first pass, or
// - The preceding tx has a different timestamp, or
// - Whenever a sequencer tx is observed, and:
// - The preceding tx was a queued tx, or
// - The preceding sequencer tx has a different block number.
// Note that a sequencer tx is usually required to create a new group,
// so a queued tx may ONLY exist as the first element in a group if it
// is the very first element or it has a different timestamp from the
// preceding tx.
needsNewGroupOnSequencerTx := !lastBlockIsSequencerTx ||
el.BlockNumber != lastBlockNumber
if len(groupedBlocks) == 0 ||
el.Timestamp != lastTimestamp ||
(el.IsSequencerTx() && needsNewGroupOnSequencerTx) {

groupedBlocks = append(groupedBlocks, groupedBlock{})
}

// Append the tx to either the sequenced or queued txs,
// depending on its type.
cur := len(groupedBlocks) - 1
if el.IsSequencerTx() {
groupedBlocks[cur].sequenced =
append(groupedBlocks[cur].sequenced, el)

// Gather all sequencer txs, as these will be encoded in
// the calldata of the batch tx submitted to the L1 CTC
// contract.
txs = append(txs, el.Tx)
} else {
groupedBlocks[cur].queued =
append(groupedBlocks[cur].queued, el)
}

lastBlockIsSequencerTx = el.IsSequencerTx()
lastTimestamp = el.Timestamp
lastBlockNumber = el.BlockNumber
}

// For each group, construct the resulting BatchContext.
for _, block := range groupedBlocks {
numSequencedTxs := uint64(len(block.sequenced))
numSubsequentQueueTxs := uint64(len(block.queued))

// Ensure at least one tx was included in this group.
if numSequencedTxs == 0 && numSubsequentQueueTxs == 0 {
return nil, ErrBlockWithInvalidContext
}

// Compute the timestamp and block number from for the batch
// using either the earliest sequenced tx or the earliest queued
// tx. If a batch has a sequencer tx it is given preference,
// since it is guaranteed to be the earliest item in the group.
// Otherwise, we fallback to the earliest queued tx since it was
// the very first item.
var (
timestamp uint64
blockNumber uint64
)
if numSequencedTxs > 0 {
timestamp = block.sequenced[0].Timestamp
blockNumber = block.sequenced[0].BlockNumber
} else {
timestamp = block.queued[0].Timestamp
blockNumber = block.queued[0].BlockNumber
}

contexts = append(contexts, BatchContext{
NumSequencedTxs: numSequencedTxs,
NumSubsequentQueueTxs: numSubsequentQueueTxs,
Timestamp: timestamp,
BlockNumber: blockNumber,
})
Timestamp: block.Time(),
BlockNumber: l1BlockNumber,
IsSequencerTx: isSequencerTx,
}
return &AppendSequencerBatchParams{
ShouldStartAtElement: shouldStartAtElement - blockOffset,
TotalElementsToAppend: uint64(len(batch)),
Contexts: contexts,
Txs: txs,
}, nil
}
9 changes: 4 additions & 5 deletions batch-submitter/drivers/sequencer/batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ import (
"math/big"
"testing"

"github.com/stretchr/testify/require"

"github.com/mantlenetworkio/mantle/batch-submitter/drivers/sequencer"
l2common "github.com/mantlenetworkio/mantle/l2geth/common"
l2types "github.com/mantlenetworkio/mantle/l2geth/core/types"
"github.com/stretchr/testify/require"
)

func TestBatchElementFromBlock(t *testing.T) {
Expand All @@ -30,8 +31,7 @@ func TestBatchElementFromBlock(t *testing.T) {

require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber)
require.True(t, element.IsSequencerTx())
require.Equal(t, element.Tx.Tx(), expTx)
require.True(t, element.IsSequencerTx)

queueMeta := l2types.NewTransactionMeta(
new(big.Int).SetUint64(expBlockNumber), 0, nil,
Expand All @@ -44,6 +44,5 @@ func TestBatchElementFromBlock(t *testing.T) {

require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber)
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
require.False(t, element.IsSequencerTx)
}
37 changes: 0 additions & 37 deletions batch-submitter/drivers/sequencer/cached_tx.go

This file was deleted.

55 changes: 41 additions & 14 deletions batch-submitter/drivers/sequencer/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ import (
"math/big"
"strings"

kms "cloud.google.com/go/kms/apiv1"
"google.golang.org/api/option"

"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
Expand All @@ -23,9 +26,6 @@ import (
"github.com/mantlenetworkio/mantle/bss-core/metrics"
"github.com/mantlenetworkio/mantle/bss-core/txmgr"
l2ethclient "github.com/mantlenetworkio/mantle/l2geth/ethclient"

kms "cloud.google.com/go/kms/apiv1"
"google.golang.org/api/option"
)

const (
Expand Down Expand Up @@ -231,31 +231,58 @@ func (d *Driver) CraftBatchTx(
log.Info(name+" crafting batch tx", "start", start, "end", end,
"nonce", nonce, "type", d.cfg.BatchType.String())

var batchElements []BatchElement
var lastTimestamp uint64
var lastBlockNumber uint64
numSequencedTxs := 0
numSubsequentQueueTxs := 0

for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil {
return nil, err
}

// For each sequencer transaction, update our running total with the
// size of the transaction.
batchElement := BatchElementFromBlock(block)
batchElements = append(batchElements, batchElement)
if batchElement.IsSequencerTx {
numSequencedTxs += 1
} else {
numSubsequentQueueTxs += 1
}
if i.Cmp(big.NewInt(0).Sub(end, bigOne)) == 0 {
lastTimestamp = batchElement.Timestamp
lastBlockNumber = batchElement.BlockNumber
}
}

blocksLen := numSequencedTxs + numSubsequentQueueTxs
shouldStartAt := start.Uint64()

for {
batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements,
var (
contexts []BatchContext
)
if err != nil {
return nil, err

batchContext := BatchContext{
NumSequencedTxs: uint64(numSequencedTxs),
NumSubsequentQueueTxs: uint64(numSubsequentQueueTxs),
Timestamp: lastTimestamp,
BlockNumber: lastBlockNumber,
}

d.metrics.BatchNumSequencedTxs().Set(float64(batchContext.NumSequencedTxs))
d.metrics.BatchNumSubsequentQueueTxs().Set(float64(batchContext.NumSubsequentQueueTxs))
d.metrics.BatchTimestamp().Set(float64(batchContext.Timestamp))
d.metrics.BatchBlockNumber().Set(float64(batchContext.BlockNumber))

contexts = append(contexts, batchContext)
batchParams := &AppendSequencerBatchParams{
ShouldStartAtElement: shouldStartAt - d.cfg.BlockOffset,
TotalElementsToAppend: uint64(blocksLen),
Contexts: contexts,
}

// Encode the batch arguments using the configured encoding type.
batchArguments, err := batchParams.Serialize(d.cfg.BatchType, start, big.NewInt(int64(d.cfg.DaUpgradeBlock)))
batchArguments, err := batchParams.Serialize(d.cfg.BatchType)
if err != nil {
return nil, err
}
Expand All @@ -266,10 +293,10 @@ func (d *Driver) CraftBatchTx(
log.Info(name+" testing batch size",
"calldata_size", len(calldata))

d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
d.metrics.NumElementsPerBatch().Observe(float64(blocksLen))

log.Info(name+" batch constructed",
"num_txs", len(batchElements),
"num_txs", blocksLen,
"final_size", len(calldata),
"batch_type", d.cfg.BatchType)

Expand Down
Loading
Loading