Skip to content

Commit

Permalink
feat: calculate acc input hash locally (0xPolygon#154)
Browse files Browse the repository at this point in the history
* feat: calculate acc input hash locally

* fix: test

* feat: Add `metadata` field on the certificate (0xPolygon#151)

* feat: use metadata field on certificate

* fix: lint and UT

* fix: comments

* fix: test

* fix: use calculated acc input hash in input prover

* fix: use calculated acc input hash in input prover

* fix: tests

* fix: tests

* fix: tests

* fix: tests

* feat: change timestamp

* feat: change timestamp

* minor improvements on the config (0xPolygon#149)

* fix: revert changes on calculation of merkel proof (0xPolygon#156)

* feat: change timestamp

* feat: update zkevm-ethtx-manager to v0.2.1 (0xPolygon#153)

* fix: tests

* fix: tests

* fix: tests

* fix: tests

* feat: refactor

* fix: typo

* fix: lock

* feat: use sqlite on lastgersync (0xPolygon#150)

* feat use sqlite on lastgersync

* apply requests

* rm tree migrations

* Update lastgersync/processor.go

Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com>

---------

Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com>

* feat: use sqlite on claimsponsor (0xPolygon#157)

* feat use sqlite on claimsponsor

* wip

* pass UTs

* fix identation

* fix identation

* rm cover.out

* rm tree migrations

* make err a var

* chore: bump cdk-erigon to v2.1.2

* Revert "chore: bump cdk-erigon to v2.1.2"

This reverts commit a5422d2.

---------

Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com>
Co-authored-by: Arnau Bennassar <arnaubennassar5@gmail.com>
Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com>
Co-authored-by: Victor Castell <0x@vcastellm.xyz>
  • Loading branch information
5 people authored Nov 6, 2024
1 parent 2a76deb commit 8611dad
Show file tree
Hide file tree
Showing 30 changed files with 615 additions and 698 deletions.
21 changes: 20 additions & 1 deletion agglayer/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ type Certificate struct {
NewLocalExitRoot [32]byte `json:"new_local_exit_root"`
BridgeExits []*BridgeExit `json:"bridge_exits"`
ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"`
Metadata common.Hash `json:"metadata"`
}

// Hash returns a hash that uniquely identifies the certificate
Expand Down Expand Up @@ -110,6 +111,20 @@ func (c *Certificate) Hash() common.Hash {
)
}

// HashToSign is the actual hash that needs to be signed by the aggsender
// as expected by the agglayer
func (c *Certificate) HashToSign() common.Hash {
globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits))
for i, importedBridgeExit := range c.ImportedBridgeExits {
globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes()
}

return crypto.Keccak256Hash(
c.NewLocalExitRoot[:],
crypto.Keccak256Hash(globalIndexHashes...).Bytes(),
)
}

// SignedCertificate is the struct that contains the certificate and the signature of the signer
type SignedCertificate struct {
*Certificate
Expand Down Expand Up @@ -138,7 +153,10 @@ type GlobalIndex struct {

func (g *GlobalIndex) Hash() common.Hash {
return crypto.Keccak256Hash(
bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex).Bytes())
cdkcommon.BigIntToLittleEndianBytes(
bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex),
),
)
}

// BridgeExit represents a token bridge exit
Expand Down Expand Up @@ -379,6 +397,7 @@ type CertificateHeader struct {
CertificateID common.Hash `json:"certificate_id"`
NewLocalExitRoot common.Hash `json:"new_local_exit_root"`
Status CertificateStatus `json:"status"`
Metadata common.Hash `json:"metadata"`
}

func (c CertificateHeader) String() string {
Expand Down
4 changes: 2 additions & 2 deletions agglayer/types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ import (
)

const (
expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}`
expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}`
expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}`
expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}`
)

func TestMarshalJSON(t *testing.T) {
Expand Down
118 changes: 98 additions & 20 deletions aggregator/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,13 @@ type Aggregator struct {
cfg Config
logger *log.Logger

state StateInterface
etherman Etherman
ethTxManager EthTxManagerClient
l1Syncr synchronizer.Synchronizer
halted atomic.Bool
state StateInterface
etherman Etherman
ethTxManager EthTxManagerClient
l1Syncr synchronizer.Synchronizer
halted atomic.Bool
accInputHashes map[uint64]common.Hash
accInputHashesMutex *sync.Mutex

profitabilityChecker aggregatorTxProfitabilityChecker
timeSendFinalProof time.Time
Expand Down Expand Up @@ -155,6 +157,8 @@ func New(
etherman: etherman,
ethTxManager: ethTxManager,
l1Syncr: l1Syncr,
accInputHashes: make(map[uint64]common.Hash),
accInputHashesMutex: &sync.Mutex{},
profitabilityChecker: profitabilityChecker,
stateDBMutex: &sync.Mutex{},
timeSendFinalProofMutex: &sync.RWMutex{},
Expand Down Expand Up @@ -219,13 +223,16 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat
a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData)

var err error
var accInputHash *common.Hash

// Get new last verified batch number from L1
lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum()
if err != nil {
a.logger.Errorf("Error getting latest verified batch number: %v", err)
}

a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber)

// Check lastVerifiedBatchNumber makes sense
if err == nil && lastVerifiedBatchNumber > rollbackData.LastBatchNumber {
err = fmt.Errorf(
Expand All @@ -234,6 +241,17 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat
)
}

if err == nil {
accInputHash, err = a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber)
if err == nil {
a.accInputHashesMutex.Lock()
a.accInputHashes = make(map[uint64]common.Hash)
a.logger.Infof("Starting AccInputHash:%v", accInputHash.String())
a.accInputHashes[lastVerifiedBatchNumber] = *accInputHash
a.accInputHashesMutex.Unlock()
}
}

// Delete wip proofs
if err == nil {
err = a.state.DeleteUngeneratedProofs(a.ctx, nil)
Expand Down Expand Up @@ -272,7 +290,6 @@ func (a *Aggregator) Start() error {
err := a.l1Syncr.Sync(true)
if err != nil {
a.logger.Fatalf("Failed to synchronize from L1: %v", err)

return err
}

Expand All @@ -297,19 +314,29 @@ func (a *Aggregator) Start() error {
healthService := newHealthChecker()
grpchealth.RegisterHealthServer(a.srv, healthService)

// Delete ungenerated recursive proofs
err = a.state.DeleteUngeneratedProofs(a.ctx, nil)
if err != nil {
return fmt.Errorf("failed to initialize proofs cache %w", err)
}

// Get last verified batch number to set the starting point for verifications
lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum()
if err != nil {
return err
}

// Delete ungenerated recursive proofs
err = a.state.DeleteUngeneratedProofs(a.ctx, nil)
a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber)

accInputHash, err := a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber)
if err != nil {
return fmt.Errorf("failed to initialize proofs cache %w", err)
return err
}

a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber)
a.logger.Infof("Starting AccInputHash:%v", accInputHash.String())
a.accInputHashesMutex.Lock()
a.accInputHashes[lastVerifiedBatchNumber] = *accInputHash
a.accInputHashesMutex.Unlock()

a.resetVerifyProofTime()

Expand Down Expand Up @@ -1007,6 +1034,15 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf
return true, nil
}

func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumber uint64) (*common.Hash, error) {
accInputHash, err := a.etherman.GetBatchAccInputHash(ctx, batchNumber)
if err != nil {
return nil, err
}

return &accInputHash, nil
}

func (a *Aggregator) getAndLockBatchToProve(
ctx context.Context, prover ProverInterface,
) (*state.Batch, []byte, *state.Proof, error) {
Expand Down Expand Up @@ -1093,15 +1129,39 @@ func (a *Aggregator) getAndLockBatchToProve(
virtualBatch.L1InfoRoot = &l1InfoRoot
}

// Calculate acc input hash as the RPC is not returning the correct one at the moment
a.accInputHashesMutex.Lock()
accInputHash := cdkcommon.CalculateAccInputHash(
a.logger,
a.accInputHashes[batchNumberToVerify-1],
virtualBatch.BatchL2Data,
*virtualBatch.L1InfoRoot,
uint64(sequence.Timestamp.Unix()),
rpcBatch.LastCoinbase(),
rpcBatch.ForcedBlockHashL1(),
)
// Store the acc input hash
a.accInputHashes[batchNumberToVerify] = accInputHash
a.accInputHashesMutex.Unlock()

// Log params to calculate acc input hash
a.logger.Debugf("Calculated acc input hash for batch %d: %v", batchNumberToVerify, accInputHash)
a.logger.Debugf("L1InfoRoot: %v", virtualBatch.L1InfoRoot)
// a.logger.Debugf("LastL2BLockTimestamp: %v", rpcBatch.LastL2BLockTimestamp())
a.logger.Debugf("TimestampLimit: %v", uint64(sequence.Timestamp.Unix()))
a.logger.Debugf("LastCoinbase: %v", rpcBatch.LastCoinbase())
a.logger.Debugf("ForcedBlockHashL1: %v", rpcBatch.ForcedBlockHashL1())

// Create state batch
stateBatch := &state.Batch{
BatchNumber: rpcBatch.BatchNumber(),
Coinbase: rpcBatch.LastCoinbase(),
// Use L1 batch data
BatchL2Data: virtualBatch.BatchL2Data,
StateRoot: rpcBatch.StateRoot(),
LocalExitRoot: rpcBatch.LocalExitRoot(),
AccInputHash: rpcBatch.AccInputHash(),
BatchL2Data: virtualBatch.BatchL2Data,
StateRoot: rpcBatch.StateRoot(),
LocalExitRoot: rpcBatch.LocalExitRoot(),
// Use calculated acc input
AccInputHash: accInputHash,
L1InfoTreeIndex: rpcBatch.L1InfoTreeIndex(),
L1InfoRoot: *virtualBatch.L1InfoRoot,
Timestamp: time.Unix(int64(rpcBatch.LastL2BLockTimestamp()), 0),
Expand Down Expand Up @@ -1414,15 +1474,20 @@ func (a *Aggregator) buildInputProver(
}

// Get Old Acc Input Hash
rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1)
if err != nil {
return nil, err
}
/*
rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1)
if err != nil {
return nil, err
}
*/

a.accInputHashesMutex.Lock()
inputProver := &prover.StatelessInputProver{
PublicInputs: &prover.StatelessPublicInputs{
Witness: witness,
OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(),
Witness: witness,
// Use calculated acc inputh hash as the RPC is not returning the correct one at the moment
// OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(),
OldAccInputHash: a.accInputHashes[batchToVerify.BatchNumber-1].Bytes(),
OldBatchNum: batchToVerify.BatchNumber - 1,
ChainId: batchToVerify.ChainID,
ForkId: batchToVerify.ForkID,
Expand All @@ -1435,6 +1500,7 @@ func (a *Aggregator) buildInputProver(
ForcedBlockhashL1: forcedBlockhashL1.Bytes(),
},
}
a.accInputHashesMutex.Unlock()

printInputProver(a.logger, inputProver)
return inputProver, nil
Expand Down Expand Up @@ -1522,6 +1588,18 @@ func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult
}

mTxResultLogger.Debugf("deleted generated proofs from %d to %d", firstBatch, lastBatch)

// Remove the acc input hashes from the map
// leaving the last batch acc input hash as it will be used as old acc input hash
a.removeAccInputHashes(firstBatch, lastBatch-1)
}

func (a *Aggregator) removeAccInputHashes(firstBatch, lastBatch uint64) {
a.accInputHashesMutex.Lock()
for i := firstBatch; i <= lastBatch; i++ {
delete(a.accInputHashes, i)
}
a.accInputHashesMutex.Unlock()
}

func (a *Aggregator) cleanupLockedProofs() {
Expand Down
Loading

0 comments on commit 8611dad

Please sign in to comment.