From 8611dadca894239dcbc7cfad0a4bcb1ca3e5a85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Wed, 6 Nov 2024 13:15:58 +0100 Subject: [PATCH] feat: calculate acc input hash locally (#154) * feat: calculate acc input hash locally * fix: test * feat: Add `metadata` field on the certificate (#151) * feat: use metadata field on certificate * fix: lint and UT * fix: comments * fix: test * fix: use calculated acc input hash in input prover * fix: use calculated acc input hash in input prover * fix: tests * fix: tests * fix: tests * fix: tests * feat: change timestamp * feat: change timestamp * minor improvements on the config (#149) * fix: revert changes on calculation of merkel proof (#156) * feat: change timestamp * feat: update zkevm-ethtx-manager to v0.2.1 (#153) * fix: tests * fix: tests * fix: tests * fix: tests * feat: refactor * fix: typo * fix: lock * feat: use sqlite on lastgersync (#150) * feat use sqlite on lastgersync * apply requests * rm tree migrations * Update lastgersync/processor.go Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> --------- Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> * feat: use sqlite on claimsponsor (#157) * feat use sqlite on claimsponsor * wip * pass UTs * fix identation * fix identation * rm cover.out * rm tree migrations * make err a var * chore: bump cdk-erigon to v2.1.2 * Revert "chore: bump cdk-erigon to v2.1.2" This reverts commit a5422d2de3afc45de6f933208fa72162f98387ac. --------- Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Co-authored-by: Arnau Bennassar Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Victor Castell <0x@vcastellm.xyz> --- agglayer/types.go | 21 +- agglayer/types_test.go | 4 +- aggregator/aggregator.go | 118 +++++- aggregator/aggregator_test.go | 80 ++-- aggsender/aggsender.go | 20 +- aggsender/aggsender_test.go | 5 +- claimsponsor/claimsponsor.go | 378 ++++++------------ claimsponsor/e2e_test.go | 6 +- claimsponsor/evmclaimsponsor.go | 2 +- claimsponsor/migrations/claimsponsor0001.sql | 20 + claimsponsor/migrations/migrations.go | 21 + common/common.go | 21 +- common/common_test.go | 61 +++ config/default.go | 12 +- go.mod | 2 +- go.sum | 4 +- l1infotree/tree.go | 12 +- l1infotree/tree_test.go | 54 --- lastgersync/e2e_test.go | 9 +- lastgersync/evmdownloader.go | 18 +- lastgersync/lastgersync.go | 4 +- lastgersync/migrations/lastgersync0001.sql | 14 + lastgersync/migrations/migrations.go | 21 + lastgersync/processor.go | 300 ++++---------- logerror | 1 + rpc/bridge.go | 8 +- rpc/bridge_interfaces.go | 7 +- rpc/mocks/claim_sponsorer.go | 52 ++- rpc/mocks/last_ge_rer.go | 36 +- .../kurtosis-cdk-node-config.toml.template | 2 - 30 files changed, 615 insertions(+), 698 deletions(-) create mode 100644 claimsponsor/migrations/claimsponsor0001.sql create mode 100644 claimsponsor/migrations/migrations.go create mode 100644 common/common_test.go create mode 100644 lastgersync/migrations/lastgersync0001.sql create mode 100644 lastgersync/migrations/migrations.go create mode 100644 logerror diff --git a/agglayer/types.go b/agglayer/types.go index e8bdb254..825c9db2 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -83,6 +83,7 @@ type Certificate struct { NewLocalExitRoot [32]byte `json:"new_local_exit_root"` BridgeExits []*BridgeExit `json:"bridge_exits"` ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` + Metadata common.Hash `json:"metadata"` } // Hash returns a hash that uniquely identifies the certificate @@ -110,6 +111,20 @@ func (c *Certificate) Hash() common.Hash { ) } +// HashToSign is the actual hash that needs to be signed by the aggsender +// as expected by the agglayer +func (c *Certificate) HashToSign() common.Hash { + globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) + for i, importedBridgeExit := range c.ImportedBridgeExits { + globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() + } + + return crypto.Keccak256Hash( + c.NewLocalExitRoot[:], + crypto.Keccak256Hash(globalIndexHashes...).Bytes(), + ) +} + // SignedCertificate is the struct that contains the certificate and the signature of the signer type SignedCertificate struct { *Certificate @@ -138,7 +153,10 @@ type GlobalIndex struct { func (g *GlobalIndex) Hash() common.Hash { return crypto.Keccak256Hash( - bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex).Bytes()) + cdkcommon.BigIntToLittleEndianBytes( + bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex), + ), + ) } // BridgeExit represents a token bridge exit @@ -379,6 +397,7 @@ type CertificateHeader struct { CertificateID common.Hash `json:"certificate_id"` NewLocalExitRoot common.Hash `json:"new_local_exit_root"` Status CertificateStatus `json:"status"` + Metadata common.Hash `json:"metadata"` } func (c CertificateHeader) String() string { diff --git a/agglayer/types_test.go b/agglayer/types_test.go index 1df1f20f..325c0b88 100644 --- a/agglayer/types_test.go +++ b/agglayer/types_test.go @@ -11,8 +11,8 @@ import ( ) const ( - expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` ) func TestMarshalJSON(t *testing.T) { diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 8aa78011..d3479fcf 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -58,11 +58,13 @@ type Aggregator struct { cfg Config logger *log.Logger - state StateInterface - etherman Etherman - ethTxManager EthTxManagerClient - l1Syncr synchronizer.Synchronizer - halted atomic.Bool + state StateInterface + etherman Etherman + ethTxManager EthTxManagerClient + l1Syncr synchronizer.Synchronizer + halted atomic.Bool + accInputHashes map[uint64]common.Hash + accInputHashesMutex *sync.Mutex profitabilityChecker aggregatorTxProfitabilityChecker timeSendFinalProof time.Time @@ -155,6 +157,8 @@ func New( etherman: etherman, ethTxManager: ethTxManager, l1Syncr: l1Syncr, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, profitabilityChecker: profitabilityChecker, stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, @@ -219,6 +223,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) var err error + var accInputHash *common.Hash // Get new last verified batch number from L1 lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() @@ -226,6 +231,8 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat a.logger.Errorf("Error getting latest verified batch number: %v", err) } + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + // Check lastVerifiedBatchNumber makes sense if err == nil && lastVerifiedBatchNumber > rollbackData.LastBatchNumber { err = fmt.Errorf( @@ -234,6 +241,17 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat ) } + if err == nil { + accInputHash, err = a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber) + if err == nil { + a.accInputHashesMutex.Lock() + a.accInputHashes = make(map[uint64]common.Hash) + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.accInputHashes[lastVerifiedBatchNumber] = *accInputHash + a.accInputHashesMutex.Unlock() + } + } + // Delete wip proofs if err == nil { err = a.state.DeleteUngeneratedProofs(a.ctx, nil) @@ -272,7 +290,6 @@ func (a *Aggregator) Start() error { err := a.l1Syncr.Sync(true) if err != nil { a.logger.Fatalf("Failed to synchronize from L1: %v", err) - return err } @@ -297,19 +314,29 @@ func (a *Aggregator) Start() error { healthService := newHealthChecker() grpchealth.RegisterHealthServer(a.srv, healthService) + // Delete ungenerated recursive proofs + err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + if err != nil { + return fmt.Errorf("failed to initialize proofs cache %w", err) + } + // Get last verified batch number to set the starting point for verifications lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { return err } - // Delete ungenerated recursive proofs - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + + accInputHash, err := a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber) if err != nil { - return fmt.Errorf("failed to initialize proofs cache %w", err) + return err } - a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.accInputHashesMutex.Lock() + a.accInputHashes[lastVerifiedBatchNumber] = *accInputHash + a.accInputHashesMutex.Unlock() a.resetVerifyProofTime() @@ -1007,6 +1034,15 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf return true, nil } +func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumber uint64) (*common.Hash, error) { + accInputHash, err := a.etherman.GetBatchAccInputHash(ctx, batchNumber) + if err != nil { + return nil, err + } + + return &accInputHash, nil +} + func (a *Aggregator) getAndLockBatchToProve( ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { @@ -1093,15 +1129,39 @@ func (a *Aggregator) getAndLockBatchToProve( virtualBatch.L1InfoRoot = &l1InfoRoot } + // Calculate acc input hash as the RPC is not returning the correct one at the moment + a.accInputHashesMutex.Lock() + accInputHash := cdkcommon.CalculateAccInputHash( + a.logger, + a.accInputHashes[batchNumberToVerify-1], + virtualBatch.BatchL2Data, + *virtualBatch.L1InfoRoot, + uint64(sequence.Timestamp.Unix()), + rpcBatch.LastCoinbase(), + rpcBatch.ForcedBlockHashL1(), + ) + // Store the acc input hash + a.accInputHashes[batchNumberToVerify] = accInputHash + a.accInputHashesMutex.Unlock() + + // Log params to calculate acc input hash + a.logger.Debugf("Calculated acc input hash for batch %d: %v", batchNumberToVerify, accInputHash) + a.logger.Debugf("L1InfoRoot: %v", virtualBatch.L1InfoRoot) + // a.logger.Debugf("LastL2BLockTimestamp: %v", rpcBatch.LastL2BLockTimestamp()) + a.logger.Debugf("TimestampLimit: %v", uint64(sequence.Timestamp.Unix())) + a.logger.Debugf("LastCoinbase: %v", rpcBatch.LastCoinbase()) + a.logger.Debugf("ForcedBlockHashL1: %v", rpcBatch.ForcedBlockHashL1()) + // Create state batch stateBatch := &state.Batch{ BatchNumber: rpcBatch.BatchNumber(), Coinbase: rpcBatch.LastCoinbase(), // Use L1 batch data - BatchL2Data: virtualBatch.BatchL2Data, - StateRoot: rpcBatch.StateRoot(), - LocalExitRoot: rpcBatch.LocalExitRoot(), - AccInputHash: rpcBatch.AccInputHash(), + BatchL2Data: virtualBatch.BatchL2Data, + StateRoot: rpcBatch.StateRoot(), + LocalExitRoot: rpcBatch.LocalExitRoot(), + // Use calculated acc input + AccInputHash: accInputHash, L1InfoTreeIndex: rpcBatch.L1InfoTreeIndex(), L1InfoRoot: *virtualBatch.L1InfoRoot, Timestamp: time.Unix(int64(rpcBatch.LastL2BLockTimestamp()), 0), @@ -1414,15 +1474,20 @@ func (a *Aggregator) buildInputProver( } // Get Old Acc Input Hash - rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1) - if err != nil { - return nil, err - } + /* + rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1) + if err != nil { + return nil, err + } + */ + a.accInputHashesMutex.Lock() inputProver := &prover.StatelessInputProver{ PublicInputs: &prover.StatelessPublicInputs{ - Witness: witness, - OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(), + Witness: witness, + // Use calculated acc inputh hash as the RPC is not returning the correct one at the moment + // OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(), + OldAccInputHash: a.accInputHashes[batchToVerify.BatchNumber-1].Bytes(), OldBatchNum: batchToVerify.BatchNumber - 1, ChainId: batchToVerify.ChainID, ForkId: batchToVerify.ForkID, @@ -1435,6 +1500,7 @@ func (a *Aggregator) buildInputProver( ForcedBlockhashL1: forcedBlockhashL1.Bytes(), }, } + a.accInputHashesMutex.Unlock() printInputProver(a.logger, inputProver) return inputProver, nil @@ -1522,6 +1588,18 @@ func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult } mTxResultLogger.Debugf("deleted generated proofs from %d to %d", firstBatch, lastBatch) + + // Remove the acc input hashes from the map + // leaving the last batch acc input hash as it will be used as old acc input hash + a.removeAccInputHashes(firstBatch, lastBatch-1) +} + +func (a *Aggregator) removeAccInputHashes(firstBatch, lastBatch uint64) { + a.accInputHashesMutex.Lock() + for i := firstBatch; i <= lastBatch; i++ { + delete(a.accInputHashes, i) + } + a.accInputHashesMutex.Unlock() } func (a *Aggregator) cleanupLockedProofs() { diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index fd03315f..e7284972 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -83,6 +83,7 @@ func Test_Start(t *testing.T) { mockL1Syncr.On("Sync", mock.Anything).Return(nil) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() mockState.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) @@ -100,6 +101,8 @@ func Test_Start(t *testing.T) { stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: types.Duration{Duration: 5 * time.Second}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } go func() { err := a.Start() @@ -149,15 +152,18 @@ func Test_handleRollbackBatches(t *testing.T) { } mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -184,11 +190,13 @@ func Test_handleRollbackBatchesHalt(t *testing.T) { } a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -213,11 +221,13 @@ func Test_handleRollbackBatchesError(t *testing.T) { } a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -320,6 +330,8 @@ func Test_sendFinalProofSuccess(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -509,6 +521,8 @@ func Test_sendFinalProofError(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -625,7 +639,9 @@ func Test_buildFinalProof(t *testing.T) { cfg: Config{ SenderAddress: common.BytesToAddress([]byte("from")).Hex(), }, - rpcClient: rpcMock, + rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } tc.setup(m, &a) @@ -884,6 +900,8 @@ func Test_tryBuildFinalProof(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck @@ -1389,6 +1407,8 @@ func Test_tryAggregateProofs(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -1533,9 +1553,8 @@ func Test_tryGenerateBatchProof(t *testing.T) { rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1557,7 +1576,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) @@ -1606,7 +1624,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( @@ -1630,7 +1647,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) @@ -1672,7 +1689,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil).Once() + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1695,7 +1712,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) @@ -1769,12 +1785,9 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) virtualBatch := synchronizer.VirtualBatch{ @@ -1841,12 +1854,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) virtualBatch := synchronizer.VirtualBatch{ @@ -1858,6 +1865,9 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( @@ -1932,6 +1942,8 @@ func Test_tryGenerateBatchProof(t *testing.T) { profitabilityChecker: NewTxProfitabilityCheckerAcceptAll(stateMock, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration), l1Syncr: synchronizerMock, rpcClient: mockRPC, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index a228e1a9..f1df20ff 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "os" "time" @@ -153,7 +154,7 @@ func (a *AggSender) sendCertificate(ctx context.Context) error { a.log.Infof("building certificate for block: %d to block: %d", fromBlock, toBlock) - certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo) + certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo, toBlock) if err != nil { return fmt.Errorf("error building certificate: %w", err) } @@ -209,7 +210,8 @@ func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCert func (a *AggSender) buildCertificate(ctx context.Context, bridges []bridgesync.Bridge, claims []bridgesync.Claim, - lastSentCertificateInfo aggsendertypes.CertificateInfo) (*agglayer.Certificate, error) { + lastSentCertificateInfo aggsendertypes.CertificateInfo, + toBlock uint64) (*agglayer.Certificate, error) { if len(bridges) == 0 && len(claims) == 0 { return nil, errNoBridgesAndClaims } @@ -245,6 +247,7 @@ func (a *AggSender) buildCertificate(ctx context.Context, BridgeExits: bridgeExits, ImportedBridgeExits: importedBridgeExits, Height: height, + Metadata: createCertificateMetadata(toBlock), }, nil } @@ -412,13 +415,19 @@ func (a *AggSender) getImportedBridgeExits( // signCertificate signs a certificate with the sequencer key func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { - hashToSign := certificate.Hash() + hashToSign := certificate.HashToSign() sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) if err != nil { return nil, err } + a.log.Infof("Signed certificate. sequencer address: %s. New local exit root: %s Hash signed: %s", + crypto.PubkeyToAddress(a.sequencerKey.PublicKey).String(), + common.BytesToHash(certificate.NewLocalExitRoot[:]).String(), + hashToSign.String(), + ) + r, s, isOddParity, err := extractSignatureData(sig) if err != nil { return nil, err @@ -500,3 +509,8 @@ func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, return } + +// createCertificateMetadata creates a certificate metadata from given input +func createCertificateMetadata(toBlock uint64) common.Hash { + return common.BigToHash(new(big.Int).SetUint64(toBlock)) +} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 69dc6ed1..71878679 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -493,6 +493,7 @@ func TestBuildCertificate(t *testing.T) { bridges []bridgesync.Bridge claims []bridgesync.Claim lastSentCertificateInfo aggsendertypes.CertificateInfo + toBlock uint64 mockFn func() expectedCert *agglayer.Certificate expectedError bool @@ -532,10 +533,12 @@ func TestBuildCertificate(t *testing.T) { NewLocalExitRoot: common.HexToHash("0x123"), Height: 1, }, + toBlock: 10, expectedCert: &agglayer.Certificate{ NetworkID: 1, PrevLocalExitRoot: common.HexToHash("0x123"), NewLocalExitRoot: common.HexToHash("0x789"), + Metadata: createCertificateMetadata(10), BridgeExits: []*agglayer.BridgeExit{ { LeafType: agglayer.LeafTypeAsset, @@ -686,7 +689,7 @@ func TestBuildCertificate(t *testing.T) { l1infoTreeSyncer: mockL1InfoTreeSyncer, log: log.WithFields("test", "unittest"), } - cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo) + cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo, tt.toBlock) if tt.expectedError { require.Error(t, err) diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index c9df6561..32483789 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -2,56 +2,51 @@ package claimsponsor import ( "context" - "encoding/json" + "database/sql" "errors" - "math" + "fmt" "math/big" "time" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/claimsponsor/migrations" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/iter" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/russross/meddler" ) type ClaimStatus string const ( - PendingClaimStatus = "pending" - WIPStatus = "work in progress" - SuccessClaimStatus = "success" - FailedClaimStatus = "failed" - - claimTable = "claimsponsor-tx" - queueTable = "claimsponsor-queue" + PendingClaimStatus ClaimStatus = "pending" + WIPClaimStatus ClaimStatus = "work in progress" + SuccessClaimStatus ClaimStatus = "success" + FailedClaimStatus ClaimStatus = "failed" ) var ( - ErrInvalidClaim = errors.New("invalid claim") + ErrInvalidClaim = errors.New("invalid claim") + ErrClaimDoesntExist = errors.New("the claim requested to be updated does not exist") ) // Claim representation of a claim event type Claim struct { - LeafType uint8 - ProofLocalExitRoot tree.Proof - ProofRollupExitRoot tree.Proof - GlobalIndex *big.Int - MainnetExitRoot common.Hash - RollupExitRoot common.Hash - OriginNetwork uint32 - OriginTokenAddress common.Address - DestinationNetwork uint32 - DestinationAddress common.Address - Amount *big.Int - Metadata []byte - - Status ClaimStatus - TxID string + LeafType uint8 `meddler:"leaf_type"` + ProofLocalExitRoot tree.Proof `meddler:"proof_local_exit_root,merkleproof"` + ProofRollupExitRoot tree.Proof `meddler:"proof_rollup_exit_root,merkleproof"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + OriginNetwork uint32 `meddler:"origin_network"` + OriginTokenAddress common.Address `meddler:"origin_token_address,address"` + DestinationNetwork uint32 `meddler:"destination_network"` + DestinationAddress common.Address `meddler:"destination_address,address"` + Amount *big.Int `meddler:"amount,bigint"` + Metadata []byte `meddler:"metadata"` + Status ClaimStatus `meddler:"status"` + TxID string `meddler:"tx_id"` } func (c *Claim) Key() []byte { @@ -66,7 +61,7 @@ type ClaimSender interface { type ClaimSponsor struct { logger *log.Logger - db kv.RwDB + db *sql.DB sender ClaimSender rh *sync.RetryHandler waitTxToBeMinedPeriod time.Duration @@ -82,18 +77,11 @@ func newClaimSponsor( waitTxToBeMinedPeriod time.Duration, waitOnEmptyQueue time.Duration, ) (*ClaimSponsor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - claimTable: {}, - queueTable: {}, - } - - return cfg + err := migrations.RunMigrations(dbPath) + if err != nil { + return nil, err } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } @@ -115,264 +103,136 @@ func newClaimSponsor( func (c *ClaimSponsor) Start(ctx context.Context) { var ( attempts int - err error ) for { + err := c.claim(ctx) if err != nil { attempts++ + c.logger.Error(err) c.rh.Handle("claimsponsor main loop", attempts) + } else { + attempts = 0 } - tx, err2 := c.db.BeginRw(ctx) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling BeginRw: %v", err) - continue - } - queueIndex, globalIndex, err2 := getFirstQueueIndex(tx) - if err2 != nil { - err = err2 - tx.Rollback() - if errors.Is(err, db.ErrNotFound) { - c.logger.Debugf("queue is empty") - err = nil - time.Sleep(c.waitOnEmptyQueue) - - continue - } - c.logger.Errorf("error calling getFirstQueueIndex: %v", err) - continue - } - claim, err2 := getClaim(tx, globalIndex) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - if claim.TxID == "" { - txID, err2 := c.sender.sendClaim(ctx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - claim.TxID = txID - claim.Status = WIPStatus - err2 = putClaim(tx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - } - err2 = tx.Commit() - if err2 != nil { - err = err2 - c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) - continue - } - - c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) - status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) - continue - } - c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) - tx, err2 = c.db.BeginRw(ctx) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling BeginRw: %v", err) - continue - } - claim.Status = status - err2 = putClaim(tx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex)) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) - continue - } - err2 = tx.Commit() - if err2 != nil { - err = err2 - c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) - continue - } - - attempts = 0 } } -func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) { - t := time.NewTicker(c.waitTxToBeMinedPeriod) - for { - select { - case <-ctx.Done(): - return "", errors.New("context cancelled") - case <-t.C: - status, err := c.sender.claimStatus(ctx, txID) - if err != nil { - return "", err - } - if status == FailedClaimStatus || status == SuccessClaimStatus { - return status, nil +func (c *ClaimSponsor) claim(ctx context.Context) error { + claim, err := c.getWIPClaim() + if err != nil && !errors.Is(err, db.ErrNotFound) { + return fmt.Errorf("error getting WIP claim: %w", err) + } + if errors.Is(err, db.ErrNotFound) || claim == nil { + // there is no WIP claim, go for the next pending claim + claim, err = c.getFirstPendingClaim() + if err != nil { + if errors.Is(err, db.ErrNotFound) { + c.logger.Debugf("queue is empty") + time.Sleep(c.waitOnEmptyQueue) + return nil } + return fmt.Errorf("error calling getClaim with globalIndex %s: %w", claim.GlobalIndex.String(), err) } - } -} - -func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error { - if claim.GlobalIndex == nil { - return ErrInvalidClaim - } - claim.Status = PendingClaimStatus - tx, err := c.db.BeginRw(ctx) - if err != nil { - return err - } - - _, err = getClaim(tx, claim.GlobalIndex) - if !errors.Is(err, db.ErrNotFound) { + txID, err := c.sender.sendClaim(ctx, claim) if err != nil { - tx.Rollback() - - return err - } else { - tx.Rollback() - - return errors.New("claim already added") + return fmt.Errorf("error getting sending claim: %w", err) + } + if err := c.updateClaimTxID(claim.GlobalIndex, txID); err != nil { + return fmt.Errorf("error updating claim txID: %w", err) } } - err = putClaim(tx, claim) - if err != nil { - tx.Rollback() - - return err - } - - var queuePosition uint64 - lastQueuePosition, _, err := getLastQueueIndex(tx) - switch { - case errors.Is(err, db.ErrNotFound): - queuePosition = 0 - - case err != nil: - tx.Rollback() - - return err - - default: - queuePosition = lastQueuePosition + 1 - } - - err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key()) + c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, claim.GlobalIndex.String()) + status, err := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) if err != nil { - tx.Rollback() - - return err + return fmt.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %w", claim.TxID, err) } - - return tx.Commit() + c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, claim.GlobalIndex.String(), status) + return c.updateClaimStatus(claim.GlobalIndex, status) } -func putClaim(tx kv.RwTx, claim *Claim) error { - value, err := json.Marshal(claim) - if err != nil { - return err - } +func (c *ClaimSponsor) getWIPClaim() (*Claim, error) { + claim := &Claim{} + err := meddler.QueryRow( + c.db, claim, + `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, + WIPClaimStatus, + ) + return claim, db.ReturnErrNotFound(err) +} - return tx.Put(claimTable, claim.Key(), value) +func (c *ClaimSponsor) getFirstPendingClaim() (*Claim, error) { + claim := &Claim{} + err := meddler.QueryRow( + c.db, claim, + `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, + PendingClaimStatus, + ) + return claim, db.ReturnErrNotFound(err) } -func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint64) (*Claim, error) { - tx, err := c.db.BeginRo(ctx) +func (c *ClaimSponsor) updateClaimTxID(globalIndex *big.Int, txID string) error { + res, err := c.db.Exec( + `UPDATE claim SET tx_id = $1 WHERE global_index = $2`, + txID, globalIndex.String(), + ) if err != nil { - return nil, err + return fmt.Errorf("error updating claim status: %w", err) } - defer tx.Rollback() - - globalIndexBytes, err := tx.GetOne(queueTable, dbCommon.Uint64ToBytes(queueIndex)) + rowsAff, err := res.RowsAffected() if err != nil { - return nil, err + return fmt.Errorf("error getting rows affected: %w", err) } - if globalIndexBytes == nil { - return nil, db.ErrNotFound + if rowsAff == 0 { + return ErrClaimDoesntExist } - - return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) + return nil } -func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { - iter, err := tx.RangeDescend( - queueTable, - dbCommon.Uint64ToBytes(math.MaxUint64), - dbCommon.Uint64ToBytes(0), 1, +func (c *ClaimSponsor) updateClaimStatus(globalIndex *big.Int, status ClaimStatus) error { + res, err := c.db.Exec( + `UPDATE claim SET status = $1 WHERE global_index = $2`, + status, globalIndex.String(), ) if err != nil { - return 0, nil, err + return fmt.Errorf("error updating claim status: %w", err) } - - return getIndex(iter) -} - -func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { - iter, err := tx.RangeAscend( - queueTable, - dbCommon.Uint64ToBytes(0), - nil, 1, - ) + rowsAff, err := res.RowsAffected() if err != nil { - return 0, nil, err + return fmt.Errorf("error getting rows affected: %w", err) } - - return getIndex(iter) -} - -func getIndex(iter iter.KV) (uint64, *big.Int, error) { - k, v, err := iter.Next() - if err != nil { - return 0, nil, err - } - if k == nil { - return 0, nil, db.ErrNotFound + if rowsAff == 0 { + return ErrClaimDoesntExist } - globalIndex := new(big.Int).SetBytes(v) - - return dbCommon.BytesToUint64(k), globalIndex, nil + return nil } -func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Claim, error) { - tx, err := c.db.BeginRo(ctx) - if err != nil { - return nil, err +func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) { + t := time.NewTicker(c.waitTxToBeMinedPeriod) + for { + select { + case <-ctx.Done(): + return "", errors.New("context cancelled") + case <-t.C: + status, err := c.sender.claimStatus(ctx, txID) + if err != nil { + return "", err + } + if status == FailedClaimStatus || status == SuccessClaimStatus { + return status, nil + } + } } - defer tx.Rollback() +} - return getClaim(tx, globalIndex) +func (c *ClaimSponsor) AddClaimToQueue(claim *Claim) error { + claim.Status = PendingClaimStatus + return meddler.Insert(c.db, "claim", claim) } -func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { - claimBytes, err := tx.GetOne(claimTable, globalIndex.Bytes()) - if err != nil { - return nil, err - } - if claimBytes == nil { - return nil, db.ErrNotFound - } +func (c *ClaimSponsor) GetClaim(globalIndex *big.Int) (*Claim, error) { claim := &Claim{} - err = json.Unmarshal(claimBytes, claim) - - return claim, err + err := meddler.QueryRow( + c.db, claim, `SELECT * FROM claim WHERE global_index = $1`, globalIndex.String(), + ) + return claim, db.ReturnErrNotFound(err) } diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 426d7b3e..dc61416e 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -31,7 +31,7 @@ func TestE2EL1toEVML2(t *testing.T) { go bridgeSyncL1.Start(ctx) // start claim sponsor - dbPathClaimSponsor := t.TempDir() + dbPathClaimSponsor := path.Join(t.TempDir(), "file::memory:?cache=shared") claimer, err := claimsponsor.NewEVMClaimSponsor( log.GetDefaultLogger(), dbPathClaimSponsor, @@ -71,7 +71,7 @@ func TestE2EL1toEVML2(t *testing.T) { // Request to sponsor claim globalIndex := bridgesync.GenerateGlobalIndex(true, 0, uint32(i)) - err = claimer.AddClaimToQueue(ctx, &claimsponsor.Claim{ + err = claimer.AddClaimToQueue(&claimsponsor.Claim{ LeafType: 0, ProofLocalExitRoot: localProof, ProofRollupExitRoot: rollupProof, @@ -90,7 +90,7 @@ func TestE2EL1toEVML2(t *testing.T) { // Wait until success succeed := false for i := 0; i < 10; i++ { - claim, err := claimer.GetClaim(ctx, globalIndex) + claim, err := claimer.GetClaim(globalIndex) require.NoError(t, err) if claim.Status == claimsponsor.FailedClaimStatus { require.NoError(t, errors.New("claim failed")) diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 12d0c4ca..6f315d94 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -168,7 +168,7 @@ func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStat switch res.Status { case ethtxtypes.MonitoredTxStatusCreated, ethtxtypes.MonitoredTxStatusSent: - return WIPStatus, nil + return WIPClaimStatus, nil case ethtxtypes.MonitoredTxStatusFailed: return FailedClaimStatus, nil case ethtxtypes.MonitoredTxStatusMined, diff --git a/claimsponsor/migrations/claimsponsor0001.sql b/claimsponsor/migrations/claimsponsor0001.sql new file mode 100644 index 00000000..9e4586ea --- /dev/null +++ b/claimsponsor/migrations/claimsponsor0001.sql @@ -0,0 +1,20 @@ +-- +migrate Down +DROP TABLE IF EXISTS claim; + +-- +migrate Up +CREATE TABLE claim ( + leaf_type INT NOT NULL, + proof_local_exit_root VARCHAR NOT NULL, + proof_rollup_exit_root VARCHAR NOT NULL, + global_index VARCHAR NOT NULL, + mainnet_exit_root VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + origin_network INT NOT NULL, + origin_token_address VARCHAR NOT NULL, + destination_network INT NOT NULL, + destination_address VARCHAR NOT NULL, + amount VARCHAR NOT NULL, + metadata VARCHAR, + status VARCHAR NOT NULL, + tx_id VARCHAR NOT NULL +); \ No newline at end of file diff --git a/claimsponsor/migrations/migrations.go b/claimsponsor/migrations/migrations.go new file mode 100644 index 00000000..9166b5b3 --- /dev/null +++ b/claimsponsor/migrations/migrations.go @@ -0,0 +1,21 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed claimsponsor0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "claimsponsor0001", + SQL: mig001, + }, + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/common/common.go b/common/common.go index c74f56e4..15206902 100644 --- a/common/common.go +++ b/common/common.go @@ -83,6 +83,7 @@ func CalculateAccInputHash( } v2 = keccak256.Hash(v2) + calculatedAccInputHash := common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) logger.Debugf("OldAccInputHash: %v", oldAccInputHash) logger.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) @@ -90,8 +91,9 @@ func CalculateAccInputHash( logger.Debugf("TimeStampLimit: %v", timestampLimit) logger.Debugf("Sequencer Address: %v", sequencerAddr) logger.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) + logger.Debugf("CalculatedAccInputHash: %v", calculatedAccInputHash) - return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) + return calculatedAccInputHash } // NewKeyFromKeystore creates a private key from a keystore file @@ -109,3 +111,20 @@ func NewKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) } return key.PrivateKey, nil } + +// BigIntToLittleEndianBytes converts a big.Int to a 32-byte little-endian representation. +// big.Int is capped to 32 bytes +func BigIntToLittleEndianBytes(n *big.Int) []byte { + // Get the absolute value in big-endian byte slice + beBytes := n.Bytes() + + // Initialize a 32-byte array for the result + leBytes := make([]byte, common.HashLength) + + // Fill the array in reverse order to convert to little-endian + for i := 0; i < len(beBytes) && i < common.HashLength; i++ { + leBytes[i] = beBytes[len(beBytes)-1-i] + } + + return leBytes +} diff --git a/common/common_test.go b/common/common_test.go new file mode 100644 index 00000000..b6b99c5f --- /dev/null +++ b/common/common_test.go @@ -0,0 +1,61 @@ +package common + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAsLittleEndianSlice(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input *big.Int + expected []byte + }{ + { + name: "Zero value", + input: big.NewInt(0), + expected: make([]byte, 32), + }, + { + name: "Positive value", + input: big.NewInt(123456789), + expected: append([]byte{21, 205, 91, 7}, make([]byte, 28)...), + }, + { + name: "Negative value", + input: big.NewInt(-123456789), + expected: append([]byte{21, 205, 91, 7}, make([]byte, 28)...), + }, + { + name: "Large positive value", + input: new(big.Int).SetBytes([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}), + expected: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := BigIntToLittleEndianBytes(tt.input) + require.Len(t, result, common.HashLength) + + for i := range result { + require.Equal(t, tt.expected[i], result[i], + fmt.Sprintf("expected byte at index %d to be %x, got %x", i, tt.expected[i], result[i])) + } + }) + } +} diff --git a/config/default.go b/config/default.go index 7f2ae8b6..096d98de 100644 --- a/config/default.go +++ b/config/default.go @@ -5,8 +5,6 @@ package config const DefaultMandatoryVars = ` L1URL = "http://localhost:8545" L2URL = "http://localhost:8123" -L1AggOracleURL = "http://test-aggoracle-l1:8545" -L2AggOracleURL = "http://test-aggoracle-l2:8545" AggLayerURL = "https://agglayer-dev.polygon.technology" ForkId = 9 @@ -219,18 +217,18 @@ GlobalExitRootAddr="{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" RollupManagerAddr = "{{NetworkConfig.L1.RollupManagerAddr}}" SyncBlockChunkSize=10 BlockFinality="LatestBlock" -URLRPCL1="{{L1AggOracleURL}}" +URLRPCL1="{{L1URL}}" WaitForNewBlocksPeriod="100ms" InitialBlock=0 [AggOracle] TargetChainType="EVM" -URLRPCL1="{{L1AggOracleURL}}" +URLRPCL1="{{L1URL}}" BlockFinality="FinalizedBlock" WaitPeriodNextGER="100ms" [AggOracle.EVMSender] GlobalExitRootL2="{{L2Config.GlobalExitRootAddr}}" - URLRPCL2="{{L2AggOracleURL}}" + URLRPCL2="{{L2URL}}" ChainIDL2=1337 GasOffset=0 WaitPeriodMonitorTx="100ms" @@ -251,7 +249,7 @@ WaitPeriodNextGER="100ms" SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [AggOracle.EVMSender.EthTxManager.Etherman] - URL = "{{L2AggOracleURL}}" + URL = "{{L2URL}}" MultiGasProvider = false L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] @@ -290,7 +288,7 @@ GasOffset = 0 SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [ClaimSponsor.EthTxManager.Etherman] - URL = "{{L2AggOracleURL}}" + URL = "{{L2URL}}" MultiGasProvider = false L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] diff --git a/go.mod b/go.mod index ae03382e..4a3a983e 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 - github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 + github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 diff --git a/go.sum b/go.sum index 96f2dc93..28771a51 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/0xPolygon/cdk-data-availability v0.0.10 h1:pVcke2I7GuPH7JeRLKokEOHffP github.com/0xPolygon/cdk-data-availability v0.0.10/go.mod h1:nn5RmnkzOiugAxizSbaYnA+em79YLLLoR25i0UlKc5Q= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= -github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= -github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 h1:2Yb+KdJFMpVrS9LIkd658XiWuN+MCTs7SgeWaopXScg= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.1/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 17258ba0..f3ad6d36 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -109,17 +109,15 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] if len(leaves)%2 == 1 { leaves = append(leaves, mt.zeroHashes[h]) } - if index%2 == 1 { // If it is odd - siblings = append(siblings, leaves[index-1]) - } else if len(leaves) > 1 { // It is even - if index >= uint32(len(leaves)) { - // siblings = append(siblings, mt.zeroHashes[h]) + if index >= uint32(len(leaves)) { + siblings = append(siblings, mt.zeroHashes[h]) + } else { + if index%2 == 1 { // If it is odd siblings = append(siblings, leaves[index-1]) - } else { + } else { // It is even siblings = append(siblings, leaves[index+1]) } } - var ( nsi [][][]byte hashes [][32]byte diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go index a0fe9b97..6af4b8b3 100644 --- a/l1infotree/tree_test.go +++ b/l1infotree/tree_test.go @@ -3,7 +3,6 @@ package l1infotree_test import ( "encoding/hex" "encoding/json" - "fmt" "os" "testing" @@ -130,56 +129,3 @@ func TestAddLeaf2(t *testing.T) { require.Equal(t, testVector.NewRoot, newRoot) } } - -func TestAddLeaf2TestLastLeaf(t *testing.T) { - mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) - require.NoError(t, err) - leaves := [][32]byte{ - common.HexToHash("0x6a617315ffc0a6831d2de6331f8d3e053889e9385696c13f11853fdcba50e123"), - common.HexToHash("0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741"), - } - siblings, root, err := mt.ComputeMerkleProof(2, leaves) - require.NoError(t, err) - fmt.Printf("Root: %s\n", root.String()) - for i := 0; i < len(siblings); i++ { - hash := common.BytesToHash(siblings[i][:]) - fmt.Printf("Sibling %d: %s\n", i, hash.String()) - } - expectedProof := []string{ - "0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741", - "0x7ae3eca221dee534b82adffb8003ad3826ddf116132e4ff55c681ff723bc7e42", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} - for i := 0; i < len(siblings); i++ { - require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) - } - require.Equal(t, "0xb85687d05a6bdccadcc1170a0e2bbba6855c35c984a0bc91697bc066bd38a338", root.String()) -} diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index e4d5e407..9b9a6f36 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -3,6 +3,7 @@ package lastgersync_test import ( "context" "fmt" + "path" "strconv" "testing" "time" @@ -18,7 +19,7 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) - dbPathSyncer := t.TempDir() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") syncer, err := lastgersync.New( ctx, dbPathSyncer, @@ -65,8 +66,8 @@ func TestE2E(t *testing.T) { } require.True(t, syncerUpToDate, errMsg) - _, actualGER, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedGER), actualGER) + e, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) + require.NoError(t, err, fmt.Sprint("iteration: ", i)) + require.Equal(t, common.Hash(expectedGER), e.GlobalExitRoot, fmt.Sprint("iteration: ", i)) } } diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index e76bb578..bf9a236f 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -62,13 +62,13 @@ func newDownloader( func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) { var ( attempts int - lastIndex uint32 + nextIndex uint32 err error ) for { - lastIndex, err = d.processor.getLastIndex(ctx) + lastIndex, err := d.processor.getLastIndex() if errors.Is(err, db.ErrNotFound) { - lastIndex = 0 + nextIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) attempts++ @@ -76,7 +76,9 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC continue } - + if lastIndex > 0 { + nextIndex = lastIndex + 1 + } break } for { @@ -88,12 +90,12 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC return default: } - lastBlock := d.WaitForNewBlocks(ctx, fromBlock) + fromBlock = d.WaitForNewBlocks(ctx, fromBlock) attempts = 0 var gers []Event for { - gers, err = d.getGERsFromIndex(ctx, lastIndex) + gers, err = d.getGERsFromIndex(ctx, nextIndex) if err != nil { log.Errorf("error getting GERs: %v", err) attempts++ @@ -105,7 +107,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC break } - blockHeader, isCanceled := d.GetBlockHeader(ctx, lastBlock) + blockHeader, isCanceled := d.GetBlockHeader(ctx, fromBlock) if isCanceled { return } @@ -126,7 +128,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC if !ok { log.Errorf("unexpected type %T in events", block.Events[0]) } - lastIndex = event.L1InfoTreeIndex + nextIndex = event.L1InfoTreeIndex + 1 } } } diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go index 1b40bfcf..c6689293 100644 --- a/lastgersync/lastgersync.go +++ b/lastgersync/lastgersync.go @@ -32,7 +32,7 @@ func New( waitForNewBlocksPeriod time.Duration, downloadBufferSize int, ) (*LastGERSync, error) { - processor, err := newProcessor(dbPath) + processor, err := newProcessor(dbPath, "lastGERSync") if err != nil { return nil, err } @@ -75,7 +75,7 @@ func (s *LastGERSync) Start(ctx context.Context) { func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, atOrAfterL1InfoTreeIndex uint32, -) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) { +) (Event, error) { return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex) } diff --git a/lastgersync/migrations/lastgersync0001.sql b/lastgersync/migrations/lastgersync0001.sql new file mode 100644 index 00000000..88021fa1 --- /dev/null +++ b/lastgersync/migrations/lastgersync0001.sql @@ -0,0 +1,14 @@ +-- +migrate Down +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS global_exit_root; + +-- +migrate Up +CREATE TABLE block ( + num BIGINT PRIMARY KEY +); + +CREATE TABLE imported_global_exit_root ( + block_num INTEGER PRIMARY KEY REFERENCES block(num) ON DELETE CASCADE, + global_exit_root VARCHAR NOT NULL, + l1_info_tree_index INTEGER NOT NULL +); \ No newline at end of file diff --git a/lastgersync/migrations/migrations.go b/lastgersync/migrations/migrations.go new file mode 100644 index 00000000..d55dd449 --- /dev/null +++ b/lastgersync/migrations/migrations.go @@ -0,0 +1,21 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed lastgersync0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "lastgersync0001", + SQL: mig001, + }, + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 45104f09..dd86482f 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -2,292 +2,136 @@ package lastgersync import ( "context" + "database/sql" "errors" - "fmt" - "math" - "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/lastgersync/migrations" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - lastProcessedTable = "lastgersync-lastProcessed" - gerTable = "lastgersync-ger" - blockTable = "lastgersync-block" -) - -var ( - lastProcessedKey = []byte("lp") + "github.com/russross/meddler" ) type Event struct { - GlobalExitRoot ethCommon.Hash - L1InfoTreeIndex uint32 + GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` + L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` } -type blockWithGERs struct { - // inclusive - FirstIndex uint32 - // not inclusive - LastIndex uint32 -} - -func (b *blockWithGERs) MarshalBinary() ([]byte, error) { - return append(common.Uint32ToBytes(b.FirstIndex), common.Uint32ToBytes(b.LastIndex)...), nil -} - -func (b *blockWithGERs) UnmarshalBinary(data []byte) error { - const expectedDataLength = 8 - if len(data) != expectedDataLength { - return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) - } - b.FirstIndex = common.BytesToUint32(data[:4]) - b.LastIndex = common.BytesToUint32(data[4:]) - - return nil +type eventWithBlockNum struct { + GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` + L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` + BlockNum uint64 `meddler:"block_num"` } type processor struct { - db kv.RwDB + db *sql.DB + log *log.Logger } -func newProcessor(dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - lastProcessedTable: {}, - gerTable: {}, - blockTable: {}, - } - - return cfg +func newProcessor(dbPath string, loggerPrefix string) (*processor, error) { + err := migrations.RunMigrations(dbPath) + if err != nil { + return nil, err } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } - + logger := log.WithFields("lastger-syncer", loggerPrefix) return &processor{ - db: db, + db: db, + log: logger, }, nil } -// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks +// GetLastProcessedBlock returns the last processed block by the processor, including blocks // that don't have events func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getLastProcessedBlockWithTx(tx) -} - -func (p *processor) getLastIndex(ctx context.Context) (uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getLastIndexWithTx(tx) -} - -func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { - iter, err := tx.RangeDescend(gerTable, common.Uint32ToBytes(math.MaxUint32), common.Uint32ToBytes(0), 1) - if err != nil { - return 0, err - } - k, _, err := iter.Next() - if err != nil { - return 0, err - } - if k == nil { - return 0, db.ErrNotFound + var lastProcessedBlock uint64 + row := p.db.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlock) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil } - - return common.BytesToUint32(k), nil + return lastProcessedBlock, err } -func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { - if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil { - return 0, err - } else if lastProcessedBytes == nil { +func (p *processor) getLastIndex() (uint32, error) { + var lastIndex uint32 + row := p.db.QueryRow(` + SELECT l1_info_tree_index + FROM imported_global_exit_root + ORDER BY l1_info_tree_index DESC LIMIT 1; + `) + err := row.Scan(&lastIndex) + if errors.Is(err, sql.ErrNoRows) { return 0, nil - } else { - return common.BytesToUint64(lastProcessedBytes), nil } -} - -func (p *processor) updateLastProcessedBlockWithTx(tx kv.RwTx, blockNum uint64) error { - return tx.Put(lastProcessedTable, lastProcessedKey, common.Uint64ToBytes(blockNum)) + return lastIndex, err } func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } - - lenEvents := len(block.Events) - var lastIndex int64 - if lenEvents > 0 { - li, err := p.getLastIndexWithTx(tx) - switch { - case errors.Is(err, db.ErrNotFound): - lastIndex = -1 - - case err != nil: - tx.Rollback() - return err - - default: - lastIndex = int64(li) + shouldRollback := true + defer func() { + if shouldRollback { + if errRollback := tx.Rollback(); errRollback != nil { + log.Errorf("error while rolling back tx %v", errRollback) + } } - } + }() + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { + return err + } for _, e := range block.Events { event, ok := e.(Event) if !ok { - log.Errorf("unexpected type %T in events", e) - } - if int64(event.L1InfoTreeIndex) < lastIndex { - continue - } - lastIndex = int64(event.L1InfoTreeIndex) - if err := tx.Put( - gerTable, - common.Uint32ToBytes(event.L1InfoTreeIndex), - event.GlobalExitRoot[:], - ); err != nil { - tx.Rollback() - - return err - } - } - - if lenEvents > 0 { - firstEvent, ok := block.Events[0].(Event) - if !ok { - log.Errorf("unexpected type %T in events", block.Events[0]) - tx.Rollback() - - return fmt.Errorf("unexpected type %T in events", block.Events[0]) - } - - lastEvent, ok := block.Events[lenEvents-1].(Event) - if !ok { - log.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) - tx.Rollback() - - return fmt.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) - } - - bwg := blockWithGERs{ - FirstIndex: firstEvent.L1InfoTreeIndex, - LastIndex: lastEvent.L1InfoTreeIndex + 1, + return errors.New("failed to convert sync.Block.Event to Event") } - - data, err := bwg.MarshalBinary() - if err != nil { - tx.Rollback() - - return err - } - if err = tx.Put(blockTable, common.Uint64ToBytes(block.Num), data); err != nil { - tx.Rollback() - + if err = meddler.Insert(tx, "imported_global_exit_root", &eventWithBlockNum{ + GlobalExitRoot: event.GlobalExitRoot, + L1InfoTreeIndex: event.L1InfoTreeIndex, + BlockNum: block.Num, + }); err != nil { return err } } - if err := p.updateLastProcessedBlockWithTx(tx, block.Num); err != nil { - tx.Rollback() - + if err := tx.Commit(); err != nil { return err } - - return tx.Commit() + shouldRollback = false + p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) + return nil } func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - - iter, err := tx.Range(blockTable, common.Uint64ToBytes(firstReorgedBlock), nil) - if err != nil { - tx.Rollback() - - return err - } - for bNumBytes, bWithGERBytes, err := iter.Next(); bNumBytes != nil; bNumBytes, bWithGERBytes, err = iter.Next() { - if err != nil { - tx.Rollback() - - return err - } - if err := tx.Delete(blockTable, bNumBytes); err != nil { - tx.Rollback() - - return err - } - - bWithGER := &blockWithGERs{} - if err := bWithGER.UnmarshalBinary(bWithGERBytes); err != nil { - tx.Rollback() - - return err - } - for i := bWithGER.FirstIndex; i < bWithGER.LastIndex; i++ { - if err := tx.Delete(gerTable, common.Uint32ToBytes(i)); err != nil { - tx.Rollback() - - return err - } - } - } - - if err := p.updateLastProcessedBlockWithTx(tx, firstReorgedBlock-1); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() + _, err := p.db.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + return err } // GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex // or greater func (p *processor) GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, l1InfoTreeIndex uint32, -) (uint32, ethCommon.Hash, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, ethCommon.Hash{}, err - } - defer tx.Rollback() - - iter, err := tx.Range(gerTable, common.Uint32ToBytes(l1InfoTreeIndex), nil) - if err != nil { - return 0, ethCommon.Hash{}, err - } - l1InfoIndexBytes, ger, err := iter.Next() +) (Event, error) { + e := Event{} + err := meddler.QueryRow(p.db, &e, ` + SELECT l1_info_tree_index, global_exit_root + FROM imported_global_exit_root + WHERE l1_info_tree_index >= $1 + ORDER BY l1_info_tree_index ASC LIMIT 1; + `, l1InfoTreeIndex) if err != nil { - return 0, ethCommon.Hash{}, err - } - if l1InfoIndexBytes == nil { - return 0, ethCommon.Hash{}, db.ErrNotFound + if errors.Is(err, sql.ErrNoRows) { + return e, db.ErrNotFound + } + return e, err } - - return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil + return e, nil } diff --git a/logerror b/logerror new file mode 100644 index 00000000..cf3e44c1 --- /dev/null +++ b/logerror @@ -0,0 +1 @@ +ok github.com/0xPolygon/cdk/l1infotreesync 2.438s diff --git a/rpc/bridge.go b/rpc/bridge.go index 96394a4f..7b52ed73 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -132,11 +132,11 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd return info, nil } if networkID == b.networkID { - injectedL1InfoTreeIndex, _, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) + e, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } - info, err := b.l1InfoTree.GetInfoByIndex(ctx, injectedL1InfoTreeIndex) + info, err := b.l1InfoTree.GetInfoByIndex(ctx, e.L1InfoTreeIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } @@ -229,7 +229,7 @@ func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, r fmt.Sprintf("this client only sponsors claims for network %d", b.networkID), ) } - if err := b.sponsor.AddClaimToQueue(ctx, &claim); err != nil { + if err := b.sponsor.AddClaimToQueue(&claim); err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err)) } return nil, nil @@ -250,7 +250,7 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa if b.sponsor == nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") } - claim, err := b.sponsor.GetClaim(ctx, globalIndex) + claim, err := b.sponsor.GetClaim(globalIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err)) } diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go index 84292e22..bf6721ea 100644 --- a/rpc/bridge_interfaces.go +++ b/rpc/bridge_interfaces.go @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/lastgersync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" ) @@ -18,7 +19,7 @@ type Bridger interface { type LastGERer interface { GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, atOrAfterL1InfoTreeIndex uint32, - ) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) + ) (lastgersync.Event, error) } type L1InfoTreer interface { @@ -35,6 +36,6 @@ type L1InfoTreer interface { } type ClaimSponsorer interface { - AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error - GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) + AddClaimToQueue(claim *claimsponsor.Claim) error + GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) } diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go index 59530955..9a9ef9b5 100644 --- a/rpc/mocks/claim_sponsorer.go +++ b/rpc/mocks/claim_sponsorer.go @@ -3,11 +3,9 @@ package mocks import ( - context "context" big "math/big" claimsponsor "github.com/0xPolygon/cdk/claimsponsor" - mock "github.com/stretchr/testify/mock" ) @@ -24,17 +22,17 @@ func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { return &ClaimSponsorer_Expecter{mock: &_m.Mock} } -// AddClaimToQueue provides a mock function with given fields: ctx, claim -func (_m *ClaimSponsorer) AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error { - ret := _m.Called(ctx, claim) +// AddClaimToQueue provides a mock function with given fields: claim +func (_m *ClaimSponsorer) AddClaimToQueue(claim *claimsponsor.Claim) error { + ret := _m.Called(claim) if len(ret) == 0 { panic("no return value specified for AddClaimToQueue") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *claimsponsor.Claim) error); ok { - r0 = rf(ctx, claim) + if rf, ok := ret.Get(0).(func(*claimsponsor.Claim) error); ok { + r0 = rf(claim) } else { r0 = ret.Error(0) } @@ -48,15 +46,14 @@ type ClaimSponsorer_AddClaimToQueue_Call struct { } // AddClaimToQueue is a helper method to define mock.On call -// - ctx context.Context // - claim *claimsponsor.Claim -func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(ctx interface{}, claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { - return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", ctx, claim)} +func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { + return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", claim)} } -func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(ctx context.Context, claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*claimsponsor.Claim)) + run(args[0].(*claimsponsor.Claim)) }) return _c } @@ -66,14 +63,14 @@ func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer return _c } -func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(context.Context, *claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { +func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(*claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { _c.Call.Return(run) return _c } -// GetClaim provides a mock function with given fields: ctx, globalIndex -func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) { - ret := _m.Called(ctx, globalIndex) +// GetClaim provides a mock function with given fields: globalIndex +func (_m *ClaimSponsorer) GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) { + ret := _m.Called(globalIndex) if len(ret) == 0 { panic("no return value specified for GetClaim") @@ -81,19 +78,19 @@ func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (* var r0 *claimsponsor.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*claimsponsor.Claim, error)); ok { - return rf(ctx, globalIndex) + if rf, ok := ret.Get(0).(func(*big.Int) (*claimsponsor.Claim, error)); ok { + return rf(globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *claimsponsor.Claim); ok { - r0 = rf(ctx, globalIndex) + if rf, ok := ret.Get(0).(func(*big.Int) *claimsponsor.Claim); ok { + r0 = rf(globalIndex) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*claimsponsor.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, globalIndex) + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) } else { r1 = ret.Error(1) } @@ -107,15 +104,14 @@ type ClaimSponsorer_GetClaim_Call struct { } // GetClaim is a helper method to define mock.On call -// - ctx context.Context // - globalIndex *big.Int -func (_e *ClaimSponsorer_Expecter) GetClaim(ctx interface{}, globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { - return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, globalIndex)} +func (_e *ClaimSponsorer_Expecter) GetClaim(globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { + return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", globalIndex)} } -func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { +func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) + run(args[0].(*big.Int)) }) return _c } @@ -125,7 +121,7 @@ func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 erro return _c } -func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(context.Context, *big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { +func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(*big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { _c.Call.Return(run) return _c } diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go index d2e3068a..7b338e2e 100644 --- a/rpc/mocks/last_ge_rer.go +++ b/rpc/mocks/last_ge_rer.go @@ -5,8 +5,7 @@ package mocks import ( context "context" - common "github.com/ethereum/go-ethereum/common" - + lastgersync "github.com/0xPolygon/cdk/lastgersync" mock "github.com/stretchr/testify/mock" ) @@ -24,40 +23,31 @@ func (_m *LastGERer) EXPECT() *LastGERer_Expecter { } // GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex -func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (uint32, common.Hash, error) { +func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (lastgersync.Event, error) { ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) if len(ret) == 0 { panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") } - var r0 uint32 - var r1 common.Hash - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (uint32, common.Hash, error)); ok { + var r0 lastgersync.Event + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (lastgersync.Event, error)); ok { return rf(ctx, atOrAfterL1InfoTreeIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32) uint32); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32) lastgersync.Event); ok { r0 = rf(ctx, atOrAfterL1InfoTreeIndex) } else { - r0 = ret.Get(0).(uint32) + r0 = ret.Get(0).(lastgersync.Event) } - if rf, ok := ret.Get(1).(func(context.Context, uint32) common.Hash); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { r1 = rf(ctx, atOrAfterL1InfoTreeIndex) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(common.Hash) - } - } - - if rf, ok := ret.Get(2).(func(context.Context, uint32) error); ok { - r2 = rf(ctx, atOrAfterL1InfoTreeIndex) - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } // LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' @@ -79,12 +69,12 @@ func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx conte return _c } -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(injectedL1InfoTreeIndex uint32, ger common.Hash, err error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - _c.Call.Return(injectedL1InfoTreeIndex, ger, err) +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(_a0 lastgersync.Event, _a1 error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (uint32, common.Hash, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (lastgersync.Event, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { _c.Call.Return(run) return _c } diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 68f6ec97..1d70226d 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,8 +1,6 @@ PathRWData = "{{.path_rw_data}}/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" -L1AggOracleURL = "http://test-aggoracle-l1:8545" -L2AggOracleURL = "http://test-aggoracle-l2:8545" AggLayerURL="{{.agglayer_url}}" ForkId = {{.zkevm_rollup_fork_id}}