From 2a76deb0e333de6ab79a815565ee0daf8caf5d57 Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:04:52 +0100 Subject: [PATCH] feat: agg-sender (#22) Co-authored-by: Goran Rojovic Co-authored-by: Victor Castell <0x@vcastellm.xyz> Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Arnau Bennassar --- .../agglayer_client.go => agglayer/client.go | 42 + agglayer/mock_agglayer_client.go | 138 ++ .../agglayer/agglayer_tx.go => agglayer/tx.go | 0 agglayer/types.go | 387 +++++ agglayer/types_test.go | 66 + aggregator/aggregator.go | 5 +- aggregator/aggregator_test.go | 7 +- aggregator/config.go | 21 - aggregator/mocks/mock_agglayer_client.go | 79 - aggsender/aggsender.go | 502 ++++++ aggsender/aggsender_test.go | 1407 +++++++++++++++++ aggsender/config.go | 23 + aggsender/db/aggsender_db_storage.go | 215 +++ aggsender/db/aggsender_db_storage_test.go | 204 +++ aggsender/db/migrations/0001.sql | 12 + aggsender/db/migrations/migrations.go | 22 + aggsender/mocks/mock_aggsender_storage.go | 354 +++++ aggsender/mocks/mock_eth_client.go | 154 ++ aggsender/mocks/mock_l1infotree_syncer.go | 217 +++ aggsender/mocks/mock_l2bridge_syncer.go | 423 +++++ aggsender/mocks/mock_logger.go | 290 ++++ aggsender/types/types.go | 65 + bridgesync/bridgesync.go | 47 +- bridgesync/bridgesync_test.go | 81 + bridgesync/claimcalldata_test.go | 3 + bridgesync/config.go | 2 + bridgesync/downloader.go | 3 + bridgesync/e2e_test.go | 2 +- bridgesync/migrations/bridgesync0001.sql | 6 +- bridgesync/mocks/bridge_contractor.go | 93 ++ bridgesync/mocks/eth_clienter.go | 1136 +++++++++++++ bridgesync/mocks/reorg_detector.go | 147 ++ bridgesync/processor.go | 78 +- bridgesync/processor_test.go | 287 +++- claimsponsor/e2e_test.go | 2 +- cmd/main.go | 3 +- cmd/run.go | 55 +- common/common.go | 21 + common/components.go | 2 + config/config.go | 5 +- config/default.go | 29 +- l1infotree/tree.go | 12 +- l1infotree/tree_test.go | 54 + l1infotreesync/processor.go | 2 +- l1infotreesync/processor_test.go | 99 +- scripts/local_config | 55 +- sonar-project.properties | 4 +- test/Makefile | 23 +- .../kurtosis-cdk-node-config.toml.template | 15 +- test/helpers/lxly-bridge-test.bash | 1 - tree/tree.go | 24 +- 51 files changed, 6741 insertions(+), 183 deletions(-) rename aggregator/agglayer/agglayer_client.go => agglayer/client.go (63%) create mode 100644 agglayer/mock_agglayer_client.go rename aggregator/agglayer/agglayer_tx.go => agglayer/tx.go (100%) create mode 100644 agglayer/types.go create mode 100644 agglayer/types_test.go delete mode 100644 aggregator/mocks/mock_agglayer_client.go create mode 100644 aggsender/aggsender.go create mode 100644 aggsender/aggsender_test.go create mode 100644 aggsender/config.go create mode 100644 aggsender/db/aggsender_db_storage.go create mode 100644 aggsender/db/aggsender_db_storage_test.go create mode 100644 aggsender/db/migrations/0001.sql create mode 100644 aggsender/db/migrations/migrations.go create mode 100644 aggsender/mocks/mock_aggsender_storage.go create mode 100644 aggsender/mocks/mock_eth_client.go create mode 100644 aggsender/mocks/mock_l1infotree_syncer.go create mode 100644 aggsender/mocks/mock_l2bridge_syncer.go create mode 100644 aggsender/mocks/mock_logger.go create mode 100644 aggsender/types/types.go create mode 100644 bridgesync/bridgesync_test.go create mode 100644 bridgesync/mocks/bridge_contractor.go create mode 100644 bridgesync/mocks/eth_clienter.go create mode 100644 bridgesync/mocks/reorg_detector.go diff --git a/aggregator/agglayer/agglayer_client.go b/agglayer/client.go similarity index 63% rename from aggregator/agglayer/agglayer_client.go rename to agglayer/client.go index a5222571..132c2716 100644 --- a/aggregator/agglayer/agglayer_client.go +++ b/agglayer/client.go @@ -21,6 +21,8 @@ var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) WaitTxToBeMined(hash common.Hash, ctx context.Context) error + SendCertificate(certificate *SignedCertificate) (common.Hash, error) + GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) } // AggLayerClient is the client that will be used to interact with the AggLayer @@ -86,3 +88,43 @@ func (c *AggLayerClient) WaitTxToBeMined(hash common.Hash, ctx context.Context) } } } + +// SendCertificate sends a certificate to the AggLayer +func (c *AggLayerClient) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { + response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificate) + if err != nil { + return common.Hash{}, err + } + + if response.Error != nil { + return common.Hash{}, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) + } + + var result types.ArgHash + err = json.Unmarshal(response.Result, &result) + if err != nil { + return common.Hash{}, err + } + + return result.Hash(), nil +} + +// GetCertificateHeader returns the certificate header associated to the hash +func (c *AggLayerClient) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { + response, err := rpc.JSONRPCCall(c.url, "interop_getCertificateHeader", certificateHash) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) + } + + var result *CertificateHeader + err = json.Unmarshal(response.Result, &result) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go new file mode 100644 index 00000000..43100a2e --- /dev/null +++ b/agglayer/mock_agglayer_client.go @@ -0,0 +1,138 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package agglayer + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientMock struct { + mock.Mock +} + +// GetCertificateHeader provides a mock function with given fields: certificateHash +func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { + ret := _m.Called(certificateHash) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateHeader") + } + + var r0 *CertificateHeader + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*CertificateHeader, error)); ok { + return rf(certificateHash) + } + if rf, ok := ret.Get(0).(func(common.Hash) *CertificateHeader); ok { + r0 = rf(certificateHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*CertificateHeader) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(certificateHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendCertificate provides a mock function with given fields: certificate +func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { + ret := _m.Called(certificate) + + if len(ret) == 0 { + panic("no return value specified for SendCertificate") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(*SignedCertificate) (common.Hash, error)); ok { + return rf(certificate) + } + if rf, ok := ret.Get(0).(func(*SignedCertificate) common.Hash); ok { + r0 = rf(certificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(*SignedCertificate) error); ok { + r1 = rf(certificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAgglayerClientMock creates a new instance of AgglayerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientMock { + mock := &AgglayerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/agglayer/agglayer_tx.go b/agglayer/tx.go similarity index 100% rename from aggregator/agglayer/agglayer_tx.go rename to agglayer/tx.go diff --git a/agglayer/types.go b/agglayer/types.go new file mode 100644 index 00000000..e8bdb254 --- /dev/null +++ b/agglayer/types.go @@ -0,0 +1,387 @@ +package agglayer + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/0xPolygon/cdk/bridgesync" + cdkcommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +type CertificateStatus int + +const ( + Pending CertificateStatus = iota + Proven + Candidate + InError + Settled +) + +// String representation of the enum +func (c CertificateStatus) String() string { + return [...]string{"Pending", "Proven", "Candidate", "InError", "Settled"}[c] +} + +// UnmarshalJSON is the implementation of the json.Unmarshaler interface +func (c *CertificateStatus) UnmarshalJSON(data []byte) error { + dataStr := string(data) + + var status string + if strings.Contains(dataStr, "InError") { + status = "InError" + } else { + err := json.Unmarshal(data, &status) + if err != nil { + return err + } + } + + switch status { + case "Pending": + *c = Pending + case "InError": + *c = InError + case "Proven": + *c = Proven + case "Candidate": + *c = Candidate + case "Settled": + *c = Settled + default: + return fmt.Errorf("invalid status: %s", status) + } + + return nil +} + +type LeafType uint8 + +func (l LeafType) Uint8() uint8 { + return uint8(l) +} + +func (l LeafType) String() string { + return [...]string{"Transfer", "Message"}[l] +} + +const ( + LeafTypeAsset LeafType = iota + LeafTypeMessage +) + +// Certificate is the data structure that will be sent to the agglayer +type Certificate struct { + NetworkID uint32 `json:"network_id"` + Height uint64 `json:"height"` + PrevLocalExitRoot [32]byte `json:"prev_local_exit_root"` + NewLocalExitRoot [32]byte `json:"new_local_exit_root"` + BridgeExits []*BridgeExit `json:"bridge_exits"` + ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` +} + +// Hash returns a hash that uniquely identifies the certificate +func (c *Certificate) Hash() common.Hash { + bridgeExitsHashes := make([][]byte, len(c.BridgeExits)) + for i, bridgeExit := range c.BridgeExits { + bridgeExitsHashes[i] = bridgeExit.Hash().Bytes() + } + + importedBridgeExitsHashes := make([][]byte, len(c.ImportedBridgeExits)) + for i, importedBridgeExit := range c.ImportedBridgeExits { + importedBridgeExitsHashes[i] = importedBridgeExit.Hash().Bytes() + } + + bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) + importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) + + return crypto.Keccak256Hash( + cdkcommon.Uint32ToBytes(c.NetworkID), + cdkcommon.Uint64ToBytes(c.Height), + c.PrevLocalExitRoot[:], + c.NewLocalExitRoot[:], + bridgeExitsPart, + importedBridgeExitsPart, + ) +} + +// SignedCertificate is the struct that contains the certificate and the signature of the signer +type SignedCertificate struct { + *Certificate + Signature *Signature `json:"signature"` +} + +// Signature is the data structure that will hold the signature of the given certificate +type Signature struct { + R common.Hash `json:"r"` + S common.Hash `json:"s"` + OddParity bool `json:"odd_y_parity"` +} + +// TokenInfo encapsulates the information to uniquely identify a token on the origin network. +type TokenInfo struct { + OriginNetwork uint32 `json:"origin_network"` + OriginTokenAddress common.Address `json:"origin_token_address"` +} + +// GlobalIndex represents the global index of an imported bridge exit +type GlobalIndex struct { + MainnetFlag bool `json:"mainnet_flag"` + RollupIndex uint32 `json:"rollup_index"` + LeafIndex uint32 `json:"leaf_index"` +} + +func (g *GlobalIndex) Hash() common.Hash { + return crypto.Keccak256Hash( + bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex).Bytes()) +} + +// BridgeExit represents a token bridge exit +type BridgeExit struct { + LeafType LeafType `json:"leaf_type"` + TokenInfo *TokenInfo `json:"token_info"` + DestinationNetwork uint32 `json:"dest_network"` + DestinationAddress common.Address `json:"dest_address"` + Amount *big.Int `json:"amount"` + Metadata []byte `json:"metadata"` +} + +// Hash returns a hash that uniquely identifies the bridge exit +func (b *BridgeExit) Hash() common.Hash { + if b.Amount == nil { + b.Amount = big.NewInt(0) + } + + return crypto.Keccak256Hash( + []byte{b.LeafType.Uint8()}, + cdkcommon.Uint32ToBytes(b.TokenInfo.OriginNetwork), + b.TokenInfo.OriginTokenAddress.Bytes(), + cdkcommon.Uint32ToBytes(b.DestinationNetwork), + b.DestinationAddress.Bytes(), + b.Amount.Bytes(), + crypto.Keccak256(b.Metadata), + ) +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (b *BridgeExit) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + LeafType string `json:"leaf_type"` + TokenInfo *TokenInfo `json:"token_info"` + DestinationNetwork uint32 `json:"dest_network"` + DestinationAddress common.Address `json:"dest_address"` + Amount string `json:"amount"` + Metadata []uint `json:"metadata"` + }{ + LeafType: b.LeafType.String(), + TokenInfo: b.TokenInfo, + DestinationNetwork: b.DestinationNetwork, + DestinationAddress: b.DestinationAddress, + Amount: b.Amount.String(), + Metadata: bytesToUints(b.Metadata), + }) +} + +// bytesToUints converts a byte slice to a slice of uints +func bytesToUints(data []byte) []uint { + uints := make([]uint, len(data)) + for i, b := range data { + uints[i] = uint(b) + } + return uints +} + +// MerkleProof represents an inclusion proof of a leaf in a Merkle tree +type MerkleProof struct { + Root common.Hash `json:"root"` + Proof [types.DefaultHeight]common.Hash `json:"proof"` +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (m *MerkleProof) MarshalJSON() ([]byte, error) { + proofsAsBytes := [types.DefaultHeight][types.DefaultHeight]byte{} + for i, proof := range m.Proof { + proofsAsBytes[i] = proof + } + + return json.Marshal(&struct { + Root [types.DefaultHeight]byte `json:"root"` + Proof map[string][types.DefaultHeight][types.DefaultHeight]byte `json:"proof"` + }{ + Root: m.Root, + Proof: map[string][types.DefaultHeight][types.DefaultHeight]byte{ + "siblings": proofsAsBytes, + }, + }) +} + +// Hash returns the hash of the Merkle proof struct +func (m *MerkleProof) Hash() common.Hash { + proofsAsSingleSlice := make([]byte, 0) + + for _, proof := range m.Proof { + proofsAsSingleSlice = append(proofsAsSingleSlice, proof.Bytes()...) + } + + return crypto.Keccak256Hash( + m.Root.Bytes(), + proofsAsSingleSlice, + ) +} + +// L1InfoTreeLeafInner represents the inner part of the L1 info tree leaf +type L1InfoTreeLeafInner struct { + GlobalExitRoot common.Hash `json:"global_exit_root"` + BlockHash common.Hash `json:"block_hash"` + Timestamp uint64 `json:"timestamp"` +} + +// Hash returns the hash of the L1InfoTreeLeafInner struct +func (l *L1InfoTreeLeafInner) Hash() common.Hash { + return crypto.Keccak256Hash( + l.GlobalExitRoot.Bytes(), + l.BlockHash.Bytes(), + cdkcommon.Uint64ToBytes(l.Timestamp), + ) +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (l *L1InfoTreeLeafInner) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + GlobalExitRoot [types.DefaultHeight]byte `json:"global_exit_root"` + BlockHash [types.DefaultHeight]byte `json:"block_hash"` + Timestamp uint64 `json:"timestamp"` + }{ + GlobalExitRoot: l.GlobalExitRoot, + BlockHash: l.BlockHash, + Timestamp: l.Timestamp, + }) +} + +// L1InfoTreeLeaf represents the leaf of the L1 info tree +type L1InfoTreeLeaf struct { + L1InfoTreeIndex uint32 `json:"l1_info_tree_index"` + RollupExitRoot [32]byte `json:"rer"` + MainnetExitRoot [32]byte `json:"mer"` + Inner *L1InfoTreeLeafInner `json:"inner"` +} + +// Hash returns the hash of the L1InfoTreeLeaf struct +func (l *L1InfoTreeLeaf) Hash() common.Hash { + return l.Inner.Hash() +} + +// Claim is the interface that will be implemented by the different types of claims +type Claim interface { + Type() string + Hash() common.Hash + MarshalJSON() ([]byte, error) +} + +// ClaimFromMainnnet represents a claim originating from the mainnet +type ClaimFromMainnnet struct { + ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` + ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` + L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` +} + +// Type is the implementation of Claim interface +func (c ClaimFromMainnnet) Type() string { + return "Mainnet" +} + +// MarshalJSON is the implementation of Claim interface +func (c *ClaimFromMainnnet) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Child map[string]interface{} `json:"Mainnet"` + }{ + Child: map[string]interface{}{ + "proof_leaf_mer": c.ProofLeafMER, + "proof_ger_l1root": c.ProofGERToL1Root, + "l1_leaf": c.L1Leaf, + }, + }) +} + +// Hash is the implementation of Claim interface +func (c *ClaimFromMainnnet) Hash() common.Hash { + return crypto.Keccak256Hash( + c.ProofLeafMER.Hash().Bytes(), + c.ProofGERToL1Root.Hash().Bytes(), + c.L1Leaf.Hash().Bytes(), + ) +} + +// ClaimFromRollup represents a claim originating from a rollup +type ClaimFromRollup struct { + ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` + ProofLERToRER *MerkleProof `json:"proof_ler_rer"` + ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` + L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` +} + +// Type is the implementation of Claim interface +func (c ClaimFromRollup) Type() string { + return "Rollup" +} + +// MarshalJSON is the implementation of Claim interface +func (c *ClaimFromRollup) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Child map[string]interface{} `json:"Rollup"` + }{ + Child: map[string]interface{}{ + "proof_leaf_ler": c.ProofLeafLER, + "proof_ler_rer": c.ProofLERToRER, + "proof_ger_l1root": c.ProofGERToL1Root, + "l1_leaf": c.L1Leaf, + }, + }) +} + +// Hash is the implementation of Claim interface +func (c *ClaimFromRollup) Hash() common.Hash { + return crypto.Keccak256Hash( + c.ProofLeafLER.Hash().Bytes(), + c.ProofLERToRER.Hash().Bytes(), + c.ProofGERToL1Root.Hash().Bytes(), + c.L1Leaf.Hash().Bytes(), + ) +} + +// ImportedBridgeExit represents a token bridge exit originating on another network but claimed on the current network. +type ImportedBridgeExit struct { + BridgeExit *BridgeExit `json:"bridge_exit"` + ClaimData Claim `json:"claim_data"` + GlobalIndex *GlobalIndex `json:"global_index"` +} + +// Hash returns a hash that uniquely identifies the imported bridge exit +func (c *ImportedBridgeExit) Hash() common.Hash { + return crypto.Keccak256Hash( + c.BridgeExit.Hash().Bytes(), + c.ClaimData.Hash().Bytes(), + c.GlobalIndex.Hash().Bytes(), + ) +} + +// CertificateHeader is the structure returned by the interop_getCertificateHeader RPC call +type CertificateHeader struct { + NetworkID uint32 `json:"network_id"` + Height uint64 `json:"height"` + EpochNumber *uint64 `json:"epoch_number"` + CertificateIndex *uint64 `json:"certificate_index"` + CertificateID common.Hash `json:"certificate_id"` + NewLocalExitRoot common.Hash `json:"new_local_exit_root"` + Status CertificateStatus `json:"status"` +} + +func (c CertificateHeader) String() string { + return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s", + c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String()) +} diff --git a/agglayer/types_test.go b/agglayer/types_test.go new file mode 100644 index 00000000..1df1f20f --- /dev/null +++ b/agglayer/types_test.go @@ -0,0 +1,66 @@ +package agglayer + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +const ( + expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` +) + +func TestMarshalJSON(t *testing.T) { + cert := SignedCertificate{ + Certificate: &Certificate{ + NetworkID: 1, + Height: 1, + PrevLocalExitRoot: common.Hash{}, + NewLocalExitRoot: common.Hash{}, + BridgeExits: []*BridgeExit{ + { + LeafType: LeafTypeAsset, + DestinationAddress: common.Address{}, + Amount: big.NewInt(1), + }, + }, + ImportedBridgeExits: []*ImportedBridgeExit{ + { + BridgeExit: &BridgeExit{ + LeafType: LeafTypeAsset, + DestinationAddress: common.Address{}, + Amount: big.NewInt(1), + Metadata: []byte{}, + }, + ClaimData: nil, + GlobalIndex: &GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 1, + }, + }, + }, + }, + + Signature: &Signature{ + R: common.Hash{}, + S: common.Hash{}, + OddParity: false, + }, + } + data, err := json.Marshal(cert) + require.NoError(t, err) + log.Info(string(data)) + require.Equal(t, expectedSignedCertificateEmptyMetadataJSON, string(data)) + + cert.BridgeExits[0].Metadata = []byte{1, 2, 3} + data, err = json.Marshal(cert) + require.NoError(t, err) + log.Info(string(data)) + require.Equal(t, expectedSignedCertificateyMetadataJSON, string(data)) +} diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 1998e842..8aa78011 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -15,7 +15,7 @@ import ( "unicode" cdkTypes "github.com/0xPolygon/cdk-rpc/types" - "github.com/0xPolygon/cdk/aggregator/agglayer" + "github.com/0xPolygon/cdk/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -141,7 +141,7 @@ func New( if !cfg.SyncModeOnlyEnabled && cfg.SettlementBackend == AggLayer { aggLayerClient = agglayer.NewAggLayerClient(cfg.AggLayerURL) - sequencerPrivateKey, err = newKeyFromKeystore(cfg.SequencerPrivateKey) + sequencerPrivateKey, err = cdkcommon.NewKeyFromKeystore(cfg.SequencerPrivateKey) if err != nil { return nil, err } @@ -476,7 +476,6 @@ func (a *Aggregator) settleWithAggLayer( inputs ethmanTypes.FinalProofInputs) bool { proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) - tx := agglayer.Tx{ LastVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumber - 1), NewVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumberFinal), diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index f6e27b0f..fd03315f 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/0xPolygon/cdk/agglayer" mocks "github.com/0xPolygon/cdk/aggregator/mocks" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/config/types" @@ -53,7 +54,7 @@ type mox struct { ethTxManager *mocks.EthTxManagerClientMock etherman *mocks.EthermanMock proverMock *mocks.ProverInterfaceMock - aggLayerClientMock *mocks.AgglayerClientInterfaceMock + aggLayerClientMock *agglayer.AgglayerClientMock synchronizerMock *mocks.SynchronizerInterfaceMock rpcMock *mocks.RPCInterfaceMock } @@ -300,7 +301,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { stateMock := mocks.NewStateInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) - aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + aggLayerClient := agglayer.NewAgglayerClientMock(t) rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() @@ -489,7 +490,7 @@ func Test_sendFinalProofError(t *testing.T) { stateMock := mocks.NewStateInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) - aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + aggLayerClient := agglayer.NewAgglayerClientMock(t) rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() diff --git a/aggregator/config.go b/aggregator/config.go index cdef80fd..2d7178f7 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -1,18 +1,14 @@ package aggregator import ( - "crypto/ecdsa" "fmt" "math/big" - "os" - "path/filepath" "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" syncronizerConfig "github.com/0xPolygonHermez/zkevm-synchronizer-l1/config" - "github.com/ethereum/go-ethereum/accounts/keystore" ) // SettlementBackend is the type of the settlement backend @@ -150,20 +146,3 @@ type Config struct { // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } - -// newKeyFromKeystore creates a private key from a keystore file -func newKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { - if cfg.Path == "" && cfg.Password == "" { - return nil, nil - } - keystoreEncrypted, err := os.ReadFile(filepath.Clean(cfg.Path)) - if err != nil { - return nil, err - } - key, err := keystore.DecryptKey(keystoreEncrypted, cfg.Password) - if err != nil { - return nil, err - } - - return key.PrivateKey, nil -} diff --git a/aggregator/mocks/mock_agglayer_client.go b/aggregator/mocks/mock_agglayer_client.go deleted file mode 100644 index 2923ebe0..00000000 --- a/aggregator/mocks/mock_agglayer_client.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. - -package mocks - -import ( - agglayer "github.com/0xPolygon/cdk/aggregator/agglayer" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// AgglayerClientInterfaceMock is an autogenerated mock type for the AgglayerClientInterface type -type AgglayerClientInterfaceMock struct { - mock.Mock -} - -// SendTx provides a mock function with given fields: signedTx -func (_m *AgglayerClientInterfaceMock) SendTx(signedTx agglayer.SignedTx) (common.Hash, error) { - ret := _m.Called(signedTx) - - if len(ret) == 0 { - panic("no return value specified for SendTx") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(agglayer.SignedTx) (common.Hash, error)); ok { - return rf(signedTx) - } - if rf, ok := ret.Get(0).(func(agglayer.SignedTx) common.Hash); ok { - r0 = rf(signedTx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(agglayer.SignedTx) error); ok { - r1 = rf(signedTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// WaitTxToBeMined provides a mock function with given fields: hash, ctx -func (_m *AgglayerClientInterfaceMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { - ret := _m.Called(hash, ctx) - - if len(ret) == 0 { - panic("no return value specified for WaitTxToBeMined") - } - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { - r0 = rf(hash, ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewAgglayerClientInterfaceMock creates a new instance of AgglayerClientInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAgglayerClientInterfaceMock(t interface { - mock.TestingT - Cleanup(func()) -}) *AgglayerClientInterfaceMock { - mock := &AgglayerClientInterfaceMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go new file mode 100644 index 00000000..a228e1a9 --- /dev/null +++ b/aggsender/aggsender.go @@ -0,0 +1,502 @@ +package aggsender + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db" + aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/bridgesync" + cdkcommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const signatureSize = 65 + +var ( + errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") + errInvalidSignatureSize = errors.New("invalid signature size") + + zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") +) + +// AggSender is a component that will send certificates to the aggLayer +type AggSender struct { + log aggsendertypes.Logger + + l2Syncer aggsendertypes.L2BridgeSyncer + l1infoTreeSyncer aggsendertypes.L1InfoTreeSyncer + + storage db.AggSenderStorage + aggLayerClient agglayer.AgglayerClientInterface + + cfg Config + + sequencerKey *ecdsa.PrivateKey +} + +// New returns a new AggSender +func New( + ctx context.Context, + logger *log.Logger, + cfg Config, + aggLayerClient agglayer.AgglayerClientInterface, + l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, + l2Syncer *bridgesync.BridgeSync) (*AggSender, error) { + storage, err := db.NewAggSenderSQLStorage(logger, cfg.StoragePath) + if err != nil { + return nil, err + } + + sequencerPrivateKey, err := cdkcommon.NewKeyFromKeystore(cfg.AggsenderPrivateKey) + if err != nil { + return nil, err + } + + return &AggSender{ + cfg: cfg, + log: logger, + storage: storage, + l2Syncer: l2Syncer, + aggLayerClient: aggLayerClient, + l1infoTreeSyncer: l1InfoTreeSyncer, + sequencerKey: sequencerPrivateKey, + }, nil +} + +// Start starts the AggSender +func (a *AggSender) Start(ctx context.Context) { + go a.sendCertificates(ctx) + go a.checkIfCertificatesAreSettled(ctx) +} + +// sendCertificates sends certificates to the aggLayer +func (a *AggSender) sendCertificates(ctx context.Context) { + ticker := time.NewTicker(a.cfg.BlockGetInterval.Duration) + + for { + select { + case <-ticker.C: + if err := a.sendCertificate(ctx); err != nil { + log.Error(err) + } + case <-ctx.Done(): + a.log.Info("AggSender stopped") + return + } + } +} + +// sendCertificate sends certificate for a network +func (a *AggSender) sendCertificate(ctx context.Context) error { + a.log.Infof("trying to send a new certificate...") + + shouldSend, err := a.shouldSendCertificate(ctx) + if err != nil { + return err + } + + if !shouldSend { + a.log.Infof("waiting for pending certificates to be settled") + return nil + } + + lasL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("error getting last processed block from l2: %w", err) + } + + lastSentCertificateInfo, err := a.storage.GetLastSentCertificate(ctx) + if err != nil { + return err + } + + previousToBlock := lastSentCertificateInfo.ToBlock + if lastSentCertificateInfo.Status == agglayer.InError { + // if the last certificate was in error, we need to resend it + // from the block before the error + previousToBlock = lastSentCertificateInfo.FromBlock - 1 + } + + if previousToBlock >= lasL2BlockSynced { + a.log.Infof("no new blocks to send a certificate, last certificate block: %d, last L2 block: %d", + previousToBlock, lasL2BlockSynced) + return nil + } + + fromBlock := previousToBlock + 1 + toBlock := lasL2BlockSynced + + bridges, err := a.l2Syncer.GetBridgesPublished(ctx, fromBlock, toBlock) + if err != nil { + return fmt.Errorf("error getting bridges: %w", err) + } + + if len(bridges) == 0 { + a.log.Infof("no bridges consumed, no need to send a certificate from block: %d to block: %d", fromBlock, toBlock) + return nil + } + + claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, toBlock) + if err != nil { + return fmt.Errorf("error getting claims: %w", err) + } + + a.log.Infof("building certificate for block: %d to block: %d", fromBlock, toBlock) + + certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo) + if err != nil { + return fmt.Errorf("error building certificate: %w", err) + } + + signedCertificate, err := a.signCertificate(certificate) + if err != nil { + return fmt.Errorf("error signing certificate: %w", err) + } + + a.saveCertificateToFile(signedCertificate) + + certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) + if err != nil { + return fmt.Errorf("error sending certificate: %w", err) + } + log.Infof("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) + + if err := a.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ + Height: certificate.Height, + CertificateID: certificateHash, + NewLocalExitRoot: certificate.NewLocalExitRoot, + FromBlock: fromBlock, + ToBlock: toBlock, + }); err != nil { + return fmt.Errorf("error saving last sent certificate in db: %w", err) + } + + a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d)", + certificateHash, fromBlock, toBlock) + + return nil +} + +// saveCertificate saves the certificate to a tmp file +func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { + if signedCertificate == nil || !a.cfg.SaveCertificatesToFiles { + return + } + + fn := fmt.Sprintf("/tmp/certificate_%04d.json", signedCertificate.Height) + a.log.Infof("saving certificate to file: %s", fn) + jsonData, err := json.Marshal(signedCertificate) + if err != nil { + a.log.Errorf("error marshalling certificate: %w", err) + } + + if err = os.WriteFile(fn, jsonData, 0644); err != nil { //nolint:gosec,mnd // we are writing to a tmp file + a.log.Errorf("error writing certificate to file: %w", err) + } +} + +// buildCertificate builds a certificate from the bridge events +func (a *AggSender) buildCertificate(ctx context.Context, + bridges []bridgesync.Bridge, + claims []bridgesync.Claim, + lastSentCertificateInfo aggsendertypes.CertificateInfo) (*agglayer.Certificate, error) { + if len(bridges) == 0 && len(claims) == 0 { + return nil, errNoBridgesAndClaims + } + + bridgeExits := a.getBridgeExits(bridges) + importedBridgeExits, err := a.getImportedBridgeExits(ctx, claims) + if err != nil { + return nil, fmt.Errorf("error getting imported bridge exits: %w", err) + } + + var depositCount uint32 + if len(bridges) > 0 { + depositCount = bridges[len(bridges)-1].DepositCount + } + + exitRoot, err := a.l2Syncer.GetExitRootByIndex(ctx, depositCount) + if err != nil { + return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) + } + + height := lastSentCertificateInfo.Height + 1 + previousLER := lastSentCertificateInfo.NewLocalExitRoot + if lastSentCertificateInfo.NewLocalExitRoot == (common.Hash{}) { + // meaning this is the first certificate + height = 0 + previousLER = zeroLER + } + + return &agglayer.Certificate{ + NetworkID: a.l2Syncer.OriginNetwork(), + PrevLocalExitRoot: previousLER, + NewLocalExitRoot: exitRoot.Hash, + BridgeExits: bridgeExits, + ImportedBridgeExits: importedBridgeExits, + Height: height, + }, nil +} + +// convertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object +func (a *AggSender) convertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayer.ImportedBridgeExit, error) { + leafType := agglayer.LeafTypeAsset + if claim.IsMessage { + leafType = agglayer.LeafTypeMessage + } + + bridgeExit := &agglayer.BridgeExit{ + LeafType: leafType, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: claim.OriginNetwork, + OriginTokenAddress: claim.OriginAddress, + }, + DestinationNetwork: claim.DestinationNetwork, + DestinationAddress: claim.DestinationAddress, + Amount: claim.Amount, + Metadata: claim.Metadata, + } + + mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(claim.GlobalIndex) + if err != nil { + return nil, fmt.Errorf("error decoding global index: %w", err) + } + + return &agglayer.ImportedBridgeExit{ + BridgeExit: bridgeExit, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: mainnetFlag, + RollupIndex: rollupIndex, + LeafIndex: leafIndex, + }, + }, nil +} + +// getBridgeExits converts bridges to agglayer.BridgeExit objects +func (a *AggSender) getBridgeExits(bridges []bridgesync.Bridge) []*agglayer.BridgeExit { + bridgeExits := make([]*agglayer.BridgeExit, 0, len(bridges)) + + for _, bridge := range bridges { + bridgeExits = append(bridgeExits, &agglayer.BridgeExit{ + LeafType: agglayer.LeafType(bridge.LeafType), + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: bridge.OriginNetwork, + OriginTokenAddress: bridge.OriginAddress, + }, + DestinationNetwork: bridge.DestinationNetwork, + DestinationAddress: bridge.DestinationAddress, + Amount: bridge.Amount, + Metadata: bridge.Metadata, + }) + } + + return bridgeExits +} + +// getImportedBridgeExits converts claims to agglayer.ImportedBridgeExit objects and calculates necessary proofs +func (a *AggSender) getImportedBridgeExits( + ctx context.Context, claims []bridgesync.Claim, +) ([]*agglayer.ImportedBridgeExit, error) { + if len(claims) == 0 { + // no claims to convert + return nil, nil + } + + var ( + greatestL1InfoTreeIndexUsed uint32 + importedBridgeExits = make([]*agglayer.ImportedBridgeExit, 0, len(claims)) + claimL1Info = make([]*l1infotreesync.L1InfoTreeLeaf, 0, len(claims)) + ) + + for _, claim := range claims { + info, err := a.l1infoTreeSyncer.GetInfoByGlobalExitRoot(claim.GlobalExitRoot) + if err != nil { + return nil, fmt.Errorf("error getting info by global exit root: %w", err) + } + + claimL1Info = append(claimL1Info, info) + + if info.L1InfoTreeIndex > greatestL1InfoTreeIndexUsed { + greatestL1InfoTreeIndexUsed = info.L1InfoTreeIndex + } + } + + rootToProve, err := a.l1infoTreeSyncer.GetL1InfoTreeRootByIndex(ctx, greatestL1InfoTreeIndexUsed) + if err != nil { + return nil, fmt.Errorf("error getting L1 Info tree root by index: %d. Error: %w", greatestL1InfoTreeIndexUsed, err) + } + + for i, claim := range claims { + l1Info := claimL1Info[i] + + a.log.Debugf("claim[%d]: destAddr: %s GER:%s", i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String()) + ibe, err := a.convertClaimToImportedBridgeExit(claim) + if err != nil { + return nil, fmt.Errorf("error converting claim to imported bridge exit: %w", err) + } + + importedBridgeExits = append(importedBridgeExits, ibe) + + gerToL1Proof, err := a.l1infoTreeSyncer.GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx, l1Info.L1InfoTreeIndex, rootToProve.Hash, + ) + if err != nil { + return nil, fmt.Errorf( + "error getting L1 Info tree merkle proof for leaf index: %d and root: %s. Error: %w", + l1Info.L1InfoTreeIndex, rootToProve.Hash, err, + ) + } + + claim := claims[i] + if ibe.GlobalIndex.MainnetFlag { + ibe.ClaimData = &agglayer.ClaimFromMainnnet{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: l1Info.L1InfoTreeIndex, + RollupExitRoot: claim.RollupExitRoot, + MainnetExitRoot: claim.MainnetExitRoot, + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: l1Info.GlobalExitRoot, + Timestamp: l1Info.Timestamp, + BlockHash: l1Info.PreviousBlockHash, + }, + }, + ProofLeafMER: &agglayer.MerkleProof{ + Root: claim.MainnetExitRoot, + Proof: claim.ProofLocalExitRoot, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: rootToProve.Hash, + Proof: gerToL1Proof, + }, + } + } else { + ibe.ClaimData = &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: l1Info.L1InfoTreeIndex, + RollupExitRoot: claim.RollupExitRoot, + MainnetExitRoot: claim.MainnetExitRoot, + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: l1Info.GlobalExitRoot, + Timestamp: l1Info.Timestamp, + BlockHash: l1Info.PreviousBlockHash, + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: claim.MainnetExitRoot, + Proof: claim.ProofLocalExitRoot, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: claim.RollupExitRoot, + Proof: claim.ProofRollupExitRoot, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: rootToProve.Hash, + Proof: gerToL1Proof, + }, + } + } + } + + return importedBridgeExits, nil +} + +// signCertificate signs a certificate with the sequencer key +func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { + hashToSign := certificate.Hash() + + sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) + if err != nil { + return nil, err + } + + r, s, isOddParity, err := extractSignatureData(sig) + if err != nil { + return nil, err + } + + return &agglayer.SignedCertificate{ + Certificate: certificate, + Signature: &agglayer.Signature{ + R: r, + S: s, + OddParity: isOddParity, + }, + }, nil +} + +// checkIfCertificatesAreSettled checks if certificates are settled +func (a *AggSender) checkIfCertificatesAreSettled(ctx context.Context) { + ticker := time.NewTicker(a.cfg.CheckSettledInterval.Duration) + for { + select { + case <-ticker.C: + a.checkPendingCertificatesStatus(ctx) + case <-ctx.Done(): + return + } + } +} + +// checkPendingCertificatesStatus checks the status of pending certificates +// and updates in the storage if it changed on agglayer +func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { + pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) + if err != nil { + a.log.Errorf("error getting pending certificates: %w", err) + } + + for _, certificate := range pendingCertificates { + certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) + if err != nil { + a.log.Errorf("error getting header of certificate %s with height: %d from agglayer: %w", + certificate.CertificateID, certificate.Height, err) + continue + } + + if certificateHeader.Status != agglayer.Pending { + certificate.Status = certificateHeader.Status + + a.log.Infof("certificate %s changed status to %s", certificateHeader.String(), certificate.Status) + + if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { + a.log.Errorf("error updating certificate status in storage: %w", err) + continue + } + } + } +} + +// shouldSendCertificate checks if a certificate should be sent at given time +// if we have pending certificates, then we wait until they are settled +func (a *AggSender) shouldSendCertificate(ctx context.Context) (bool, error) { + pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) + if err != nil { + return false, fmt.Errorf("error getting pending certificates: %w", err) + } + + return len(pendingCertificates) == 0, nil +} + +// extractSignatureData extracts the R, S, and V from a 65-byte signature +func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, err error) { + if len(signature) != signatureSize { + err = errInvalidSignatureSize + return + } + + r = common.BytesToHash(signature[:32]) // First 32 bytes are R + s = common.BytesToHash(signature[32:64]) // Next 32 bytes are S + isOddParity = signature[64]%2 == 1 //nolint:mnd // Last byte is V + + return +} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go new file mode 100644 index 00000000..69dc6ed1 --- /dev/null +++ b/aggsender/aggsender_test.go @@ -0,0 +1,1407 @@ +package aggsender + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/mocks" + aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/bridgesync" + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExploratoryGetCertificateHeader(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32795") + certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") + certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) + require.NoError(t, err) + fmt.Print(certificateHeader) +} + +func TestConvertClaimToImportedBridgeExit(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + claim bridgesync.Claim + expectedError bool + expectedExit *agglayer.ImportedBridgeExit + }{ + { + name: "Asset claim", + claim: bridgesync.Claim{ + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(1), + }, + expectedError: false, + expectedExit: &agglayer.ImportedBridgeExit{ + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + }, + }, + { + name: "Message claim", + claim: bridgesync.Claim{ + IsMessage: true, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(2), + }, + expectedError: false, + expectedExit: &agglayer.ImportedBridgeExit{ + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 2, + }, + }, + }, + { + name: "Invalid global index", + claim: bridgesync.Claim{ + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: new(big.Int).SetBytes([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}), + }, + expectedError: true, + expectedExit: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{} + exit, err := aggSender.convertClaimToImportedBridgeExit(tt.claim) + + if tt.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedExit, exit) + } + }) + } +} + +func TestGetBridgeExits(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bridges []bridgesync.Bridge + expectedExits []*agglayer.BridgeExit + }{ + { + name: "Single bridge", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + expectedExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + }, + { + name: "Multiple bridges", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + { + LeafType: agglayer.LeafTypeMessage.Uint8(), + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x789"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + }, + expectedExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + { + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 3, + OriginTokenAddress: common.HexToAddress("0x789"), + }, + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + }, + }, + { + name: "No bridges", + bridges: []bridgesync.Bridge{}, + expectedExits: []*agglayer.BridgeExit{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{} + exits := aggSender.getBridgeExits(tt.bridges) + + require.Equal(t, tt.expectedExits, exits) + }) + } +} + +//nolint:dupl +func TestGetImportedBridgeExits(t *testing.T) { + t.Parallel() + + mockProof := generateTestProof(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( + treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, + mock.Anything, mock.Anything).Return(mockProof, nil) + + tests := []struct { + name string + claims []bridgesync.Claim + expectedError bool + expectedExits []*agglayer.ImportedBridgeExit + }{ + { + name: "Single claim", + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: bridgesync.GenerateGlobalIndex(false, 1, 1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + expectedError: false, + expectedExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1234"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbba"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaab"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + }, + { + name: "Multiple claims", + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaa"), + MainnetExitRoot: common.HexToHash("0xbbb"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + { + IsMessage: true, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x789"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 2), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xbbb"), + MainnetExitRoot: common.HexToHash("0xccc"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + expectedError: false, + expectedExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaa"), + MainnetExitRoot: common.HexToHash("0xbbb"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbb"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaa"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 3, + OriginTokenAddress: common.HexToAddress("0x789"), + }, + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: true, + RollupIndex: 0, + LeafIndex: 2, + }, + ClaimData: &agglayer.ClaimFromMainnnet{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xbbb"), + MainnetExitRoot: common.HexToHash("0xccc"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafMER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xccc"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + }, + { + name: "No claims", + claims: []bridgesync.Claim{}, + expectedError: false, + expectedExits: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{ + l1infoTreeSyncer: mockL1InfoTreeSyncer, + log: log.WithFields("test", "unittest"), + } + exits, err := aggSender.getImportedBridgeExits(context.Background(), tt.claims) + + if tt.expectedError { + require.Error(t, err) + require.Nil(t, exits) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedExits, exits) + } + }) + } +} + +func TestBuildCertificate(t *testing.T) { + mockL2BridgeSyncer := mocks.NewL2BridgeSyncerMock(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockProof := generateTestProof(t) + + tests := []struct { + name string + bridges []bridgesync.Bridge + claims []bridgesync.Claim + lastSentCertificateInfo aggsendertypes.CertificateInfo + mockFn func() + expectedCert *agglayer.Certificate + expectedError bool + }{ + { + name: "Valid certificate with bridges and claims", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + DepositCount: 1, + }, + }, + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: big.NewInt(1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + expectedCert: &agglayer.Certificate{ + NetworkID: 1, + PrevLocalExitRoot: common.HexToHash("0x123"), + NewLocalExitRoot: common.HexToHash("0x789"), + BridgeExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1234"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbba"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaab"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + Height: 2, + }, + mockFn: func() { + mockL2BridgeSyncer.On("OriginNetwork").Return(uint32(1)) + mockL2BridgeSyncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x789")}, nil) + + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything).Return(mockProof, nil) + }, + expectedError: false, + }, + { + name: "No bridges or claims", + bridges: []bridgesync.Bridge{}, + claims: []bridgesync.Claim{}, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + expectedCert: nil, + expectedError: true, + }, + { + name: "Error getting imported bridge exits", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + DepositCount: 1, + }, + }, + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: new(big.Int).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + }, + }, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + mockFn: func() { + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( + treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + }, + expectedCert: nil, + expectedError: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + mockL1InfoTreeSyncer.ExpectedCalls = nil + mockL2BridgeSyncer.ExpectedCalls = nil + + if tt.mockFn != nil { + tt.mockFn() + } + + aggSender := &AggSender{ + l2Syncer: mockL2BridgeSyncer, + l1infoTreeSyncer: mockL1InfoTreeSyncer, + log: log.WithFields("test", "unittest"), + } + cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo) + + if tt.expectedError { + require.Error(t, err) + require.Nil(t, cert) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedCert, cert) + } + }) + } +} + +func generateTestProof(t *testing.T) treeTypes.Proof { + t.Helper() + + proof := treeTypes.Proof{} + + for i := 0; i < int(treeTypes.DefaultHeight) && i < 10; i++ { + proof[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) + } + + return proof +} + +func TestCheckIfCertificatesAreSettled(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + pendingCertificates []*aggsendertypes.CertificateInfo + certificateHeaders map[common.Hash]*agglayer.CertificateHeader + getFromDBError error + clientError error + updateDBError error + expectedErrorLogMessages []string + expectedInfoMessages []string + }{ + { + name: "All certificates settled - update successful", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + {CertificateID: common.HexToHash("0x2"), Height: 2}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.Settled}, + common.HexToHash("0x2"): {Status: agglayer.Settled}, + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + { + name: "Some certificates in error - update successful", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + {CertificateID: common.HexToHash("0x2"), Height: 2}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.InError}, + common.HexToHash("0x2"): {Status: agglayer.Settled}, + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + { + name: "Error getting pending certificates", + getFromDBError: fmt.Errorf("storage error"), + expectedErrorLogMessages: []string{ + "error getting pending certificates: %w", + }, + }, + { + name: "Error getting certificate header", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.InError}, + }, + clientError: fmt.Errorf("client error"), + expectedErrorLogMessages: []string{ + "error getting header of certificate %s with height: %d from agglayer: %w", + }, + }, + { + name: "Error updating certificate status", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.Settled}, + }, + updateDBError: fmt.Errorf("update error"), + expectedErrorLogMessages: []string{ + "error updating certificate status in storage: %w", + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockStorage := mocks.NewAggSenderStorageMock(t) + mockAggLayerClient := agglayer.NewAgglayerClientMock(t) + mockLogger := mocks.NewLoggerMock(t) + + mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}).Return(tt.pendingCertificates, tt.getFromDBError) + for certID, header := range tt.certificateHeaders { + mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) + } + if tt.updateDBError != nil { + mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(tt.updateDBError) + } else if tt.clientError == nil && tt.getFromDBError == nil { + mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(nil) + } + + if tt.clientError != nil { + for _, msg := range tt.expectedErrorLogMessages { + mockLogger.On("Errorf", msg, mock.Anything, mock.Anything, mock.Anything).Return() + } + } else { + for _, msg := range tt.expectedErrorLogMessages { + mockLogger.On("Errorf", msg, mock.Anything).Return() + } + + for _, msg := range tt.expectedInfoMessages { + mockLogger.On("Infof", msg, mock.Anything, mock.Anything).Return() + } + } + + aggSender := &AggSender{ + log: mockLogger, + storage: mockStorage, + aggLayerClient: mockAggLayerClient, + cfg: Config{ + BlockGetInterval: types.Duration{Duration: time.Second}, + CheckSettledInterval: types.Duration{Duration: time.Second}, + }, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go aggSender.checkIfCertificatesAreSettled(ctx) + + time.Sleep(2 * time.Second) + cancel() + + mockLogger.AssertExpectations(t) + mockAggLayerClient.AssertExpectations(t) + mockStorage.AssertExpectations(t) + }) + } +} + +func TestSendCertificate(t *testing.T) { + t.Parallel() + + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + type testCfg struct { + name string + sequencerKey *ecdsa.PrivateKey + shouldSendCertificate []interface{} + getLastSentCertificate []interface{} + lastL2BlockProcessed []interface{} + getBridges []interface{} + getClaims []interface{} + getInfoByGlobalExitRoot []interface{} + getL1InfoTreeRootByIndex []interface{} + getL1InfoTreeMerkleProofFromIndexToRoot []interface{} + getExitRootByIndex []interface{} + originNetwork []interface{} + sendCertificate []interface{} + saveLastSentCertificate []interface{} + expectedError string + } + + setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorageMock, *mocks.L2BridgeSyncerMock, + *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncerMock) { + var ( + aggsender = &AggSender{ + log: log.WithFields("aggsender", 1), + cfg: Config{}, + sequencerKey: cfg.sequencerKey, + } + mockStorage *mocks.AggSenderStorageMock + mockL2Syncer *mocks.L2BridgeSyncerMock + mockAggLayerClient *agglayer.AgglayerClientMock + mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncerMock + ) + + if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || + cfg.saveLastSentCertificate != nil { + mockStorage = mocks.NewAggSenderStorageMock(t) + mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}). + Return(cfg.shouldSendCertificate...).Once() + + aggsender.storage = mockStorage + + if cfg.getLastSentCertificate != nil { + mockStorage.On("GetLastSentCertificate", mock.Anything).Return(cfg.getLastSentCertificate...).Once() + } + + if cfg.saveLastSentCertificate != nil { + mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...).Once() + } + } + + if cfg.lastL2BlockProcessed != nil || cfg.originNetwork != nil || + cfg.getBridges != nil || cfg.getClaims != nil || cfg.getInfoByGlobalExitRoot != nil { + mockL2Syncer = mocks.NewL2BridgeSyncerMock(t) + + mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(cfg.lastL2BlockProcessed...).Once() + + if cfg.getBridges != nil { + mockL2Syncer.On("GetBridgesPublished", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getBridges...).Once() + } + + if cfg.getClaims != nil { + mockL2Syncer.On("GetClaims", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getClaims...).Once() + } + + if cfg.getExitRootByIndex != nil { + mockL2Syncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(cfg.getExitRootByIndex...).Once() + } + + if cfg.originNetwork != nil { + mockL2Syncer.On("OriginNetwork").Return(cfg.originNetwork...).Once() + } + + aggsender.l2Syncer = mockL2Syncer + } + + if cfg.sendCertificate != nil { + mockAggLayerClient = agglayer.NewAgglayerClientMock(t) + mockAggLayerClient.On("SendCertificate", mock.Anything).Return(cfg.sendCertificate...).Once() + + aggsender.aggLayerClient = mockAggLayerClient + } + + if cfg.getInfoByGlobalExitRoot != nil || + cfg.getL1InfoTreeRootByIndex != nil || cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { + mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(cfg.getInfoByGlobalExitRoot...).Once() + + if cfg.getL1InfoTreeRootByIndex != nil { + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(cfg.getL1InfoTreeRootByIndex...).Once() + } + + if cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything). + Return(cfg.getL1InfoTreeMerkleProofFromIndexToRoot...).Once() + } + + aggsender.l1infoTreeSyncer = mockL1InfoTreeSyncer + } + + return aggsender, mockStorage, mockL2Syncer, mockAggLayerClient, mockL1InfoTreeSyncer + } + + tests := []testCfg{ + { + name: "error getting pending certificates", + shouldSendCertificate: []interface{}{nil, errors.New("error getting pending")}, + expectedError: "error getting pending", + }, + { + name: "should not send certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{ + {Status: agglayer.Pending}, + }, nil}, + }, + { + name: "error getting last sent certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(8), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, + expectedError: "error getting last sent certificate", + }, + { + name: "no new blocks to send certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(41), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 41, + CertificateID: common.HexToHash("0x111"), + NewLocalExitRoot: common.HexToHash("0x13223"), + FromBlock: 31, + ToBlock: 41, + }, nil}, + }, + { + name: "get bridges error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(59), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 50, + CertificateID: common.HexToHash("0x1111"), + NewLocalExitRoot: common.HexToHash("0x132233"), + FromBlock: 40, + ToBlock: 41, + }, nil}, + getBridges: []interface{}{nil, errors.New("error getting bridges")}, + expectedError: "error getting bridges", + }, + { + name: "no bridges", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(69), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 60, + CertificateID: common.HexToHash("0x11111"), + NewLocalExitRoot: common.HexToHash("0x1322233"), + FromBlock: 50, + ToBlock: 51, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{}, nil}, + }, + { + name: "get claims error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(79), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 70, + CertificateID: common.HexToHash("0x121111"), + NewLocalExitRoot: common.HexToHash("0x13122233"), + FromBlock: 60, + ToBlock: 61, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 61, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{nil, errors.New("error getting claims")}, + expectedError: "error getting claims", + }, + { + name: "error getting info by global exit root", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{nil, errors.New("error getting info by global exit root")}, + expectedError: "error getting info by global exit root", + }, + { + name: "error getting L1 Info tree root by index", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + BlockNumber: 1, + BlockPosition: 0, + PreviousBlockHash: common.HexToHash("0x123"), + Timestamp: 123456789, + MainnetExitRoot: common.HexToHash("0xccc"), + RollupExitRoot: common.HexToHash("0xddd"), + GlobalExitRoot: common.HexToHash("0xeee"), + }, nil}, + getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{}, errors.New("error getting L1 Info tree root by index")}, + expectedError: "error getting L1 Info tree root by index", + }, + { + name: "error getting L1 Info tree merkle proof from index to root", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + GlobalIndex: big.NewInt(1), + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + BlockNumber: 1, + BlockPosition: 0, + PreviousBlockHash: common.HexToHash("0x123"), + Timestamp: 123456789, + MainnetExitRoot: common.HexToHash("0xccc"), + RollupExitRoot: common.HexToHash("0xddd"), + GlobalExitRoot: common.HexToHash("0xeee"), + }, nil}, + getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{Hash: common.HexToHash("0xeee")}, nil}, + getL1InfoTreeMerkleProofFromIndexToRoot: []interface{}{treeTypes.Proof{}, errors.New("error getting L1 Info tree merkle proof")}, + expectedError: "error getting L1 Info tree merkle proof for leaf index", + }, + { + name: "send certificate error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(99), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 90, + CertificateID: common.HexToHash("0x1121111"), + NewLocalExitRoot: common.HexToHash("0x111122211"), + FromBlock: 80, + ToBlock: 81, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 81, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, errors.New("error sending certificate")}, + sequencerKey: privateKey, + expectedError: "error sending certificate", + }, + { + name: "store last sent certificate error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(109), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 100, + CertificateID: common.HexToHash("0x11121111"), + NewLocalExitRoot: common.HexToHash("0x1211122211"), + FromBlock: 90, + ToBlock: 91, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 91, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, nil}, + saveLastSentCertificate: []interface{}{errors.New("error saving last sent certificate in db")}, + sequencerKey: privateKey, + expectedError: "error saving last sent certificate in db", + }, + { + name: "successful sending of certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(119), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 110, + CertificateID: common.HexToHash("0x12121111"), + NewLocalExitRoot: common.HexToHash("0x1221122211"), + FromBlock: 100, + ToBlock: 101, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 101, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, nil}, + saveLastSentCertificate: []interface{}{nil}, + sequencerKey: privateKey, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggsender, mockStorage, mockL2Syncer, + mockAggLayerClient, mockL1InfoTreeSyncer := setupTest(tt) + + err := aggsender.sendCertificate(context.Background()) + + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + } else { + require.NoError(t, err) + } + + if mockStorage != nil { + mockStorage.AssertExpectations(t) + } + + if mockL2Syncer != nil { + mockL2Syncer.AssertExpectations(t) + } + + if mockAggLayerClient != nil { + mockAggLayerClient.AssertExpectations(t) + } + + if mockL1InfoTreeSyncer != nil { + mockL1InfoTreeSyncer.AssertExpectations(t) + } + }) + } +} + +func TestExtractSignatureData(t *testing.T) { + t.Parallel() + + testR := common.HexToHash("0x1") + testV := common.HexToHash("0x2") + + tests := []struct { + name string + signature []byte + expectedR common.Hash + expectedS common.Hash + expectedOddParity bool + expectedError error + }{ + { + name: "Valid signature - odd parity", + signature: append(append(testR.Bytes(), testV.Bytes()...), 1), + expectedR: testR, + expectedS: testV, + expectedOddParity: true, + expectedError: nil, + }, + { + name: "Valid signature - even parity", + signature: append(append(testR.Bytes(), testV.Bytes()...), 2), + expectedR: testR, + expectedS: testV, + expectedOddParity: false, + expectedError: nil, + }, + { + name: "Invalid signature size", + signature: make([]byte, 64), // Invalid size + expectedError: errInvalidSignatureSize, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + r, s, isOddParity, err := extractSignatureData(tt.signature) + + if tt.expectedError != nil { + require.Error(t, err) + require.Equal(t, tt.expectedError, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedR, r) + require.Equal(t, tt.expectedS, s) + require.Equal(t, tt.expectedOddParity, isOddParity) + } + }) + } +} + +func TestExploratoryGenerateCert(t *testing.T) { + t.Skip("This test is only for exploratory purposes, to generate json format of the certificate") + + key, err := crypto.GenerateKey() + require.NoError(t, err) + + signature, err := crypto.Sign(common.HexToHash("0x1").Bytes(), key) + require.NoError(t, err) + + r, s, v, err := extractSignatureData(signature) + require.NoError(t, err) + + certificate := &agglayer.SignedCertificate{ + Certificate: &agglayer.Certificate{ + NetworkID: 1, + Height: 1, + PrevLocalExitRoot: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + BridgeExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x11"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x22"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ + { + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 11, + }, + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x11"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x22"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + ClaimData: &agglayer.ClaimFromMainnnet{ + ProofLeafMER: &agglayer.MerkleProof{ + Root: common.HexToHash("0x1"), + Proof: [32]common.Hash{}, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x3"), + Proof: [32]common.Hash{}, + }, + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0x4"), + MainnetExitRoot: common.HexToHash("0x5"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x6"), + BlockHash: common.HexToHash("0x7"), + Timestamp: 1231, + }, + }, + }, + }, + }, + }, + Signature: &agglayer.Signature{ + R: r, + S: s, + OddParity: v, + }, + } + + file, err := os.Create("test.json") + require.NoError(t, err) + + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + require.NoError(t, encoder.Encode(certificate)) +} diff --git a/aggsender/config.go b/aggsender/config.go new file mode 100644 index 00000000..506b4e9a --- /dev/null +++ b/aggsender/config.go @@ -0,0 +1,23 @@ +package aggsender + +import ( + "github.com/0xPolygon/cdk/config/types" +) + +// Config is the configuration for the AggSender +type Config struct { + // StoragePath is the path of the sqlite db on which the AggSender will store the data + StoragePath string `mapstructure:"StoragePath"` + // AggLayerURL is the URL of the AggLayer + AggLayerURL string `mapstructure:"AggLayerURL"` + // BlockGetInterval is the interval at which the AggSender will get the blocks from L1 + BlockGetInterval types.Duration `mapstructure:"BlockGetInterval"` + // CheckSettledInterval is the interval at which the AggSender will check if the blocks are settled + CheckSettledInterval types.Duration `mapstructure:"CheckSettledInterval"` + // AggsenderPrivateKey is the private key which is used to sign certificates + AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` + // URLRPCL2 is the URL of the L2 RPC node + URLRPCL2 string `mapstructure:"URLRPCL2"` + // SaveCertificatesToFiles is a flag which tells the AggSender to save the certificates to a file + SaveCertificatesToFiles bool `mapstructure:"SaveCertificatesToFiles"` +} diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go new file mode 100644 index 00000000..25b31392 --- /dev/null +++ b/aggsender/db/aggsender_db_storage.go @@ -0,0 +1,215 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db/migrations" + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +const errWhileRollbackFormat = "error while rolling back tx: %w" + +// AggSenderStorage is the interface that defines the methods to interact with the storage +type AggSenderStorage interface { + // GetCertificateByHeight returns a certificate by its height + GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) + // GetLastSentCertificate returns the last certificate sent to the aggLayer + GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) + // SaveLastSentCertificate saves the last certificate sent to the aggLayer + SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error + // DeleteCertificate deletes a certificate from the storage + DeleteCertificate(ctx context.Context, certificateID common.Hash) error + // GetCertificatesByStatus returns a list of certificates by their status + GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) + // UpdateCertificateStatus updates the status of a certificate + UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error +} + +var _ AggSenderStorage = (*AggSenderSQLStorage)(nil) + +// AggSenderSQLStorage is the struct that implements the AggSenderStorage interface +type AggSenderSQLStorage struct { + logger *log.Logger + db *sql.DB +} + +// NewAggSenderSQLStorage creates a new AggSenderSQLStorage +func NewAggSenderSQLStorage(logger *log.Logger, dbPath string) (*AggSenderSQLStorage, error) { + if err := migrations.RunMigrations(dbPath); err != nil { + return nil, err + } + + db, err := db.NewSQLiteDB(dbPath) + if err != nil { + return nil, err + } + + return &AggSenderSQLStorage{ + db: db, + logger: logger, + }, nil +} + +func (a *AggSenderSQLStorage) GetCertificatesByStatus(ctx context.Context, + statuses []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + query := "SELECT * FROM certificate_info" + args := make([]interface{}, len(statuses)) + + if len(statuses) > 0 { + placeholders := make([]string, len(statuses)) + // Build the WHERE clause for status filtering + for i := range statuses { + placeholders[i] = fmt.Sprintf("$%d", i+1) + args[i] = statuses[i] + } + + // Build the WHERE clause with the joined placeholders + query += " WHERE status IN (" + strings.Join(placeholders, ", ") + ")" + } + + // Add ordering by creation date (oldest first) + query += " ORDER BY height ASC" + + var certificates []*types.CertificateInfo + if err := meddler.QueryAll(a.db, &certificates, query, args...); err != nil { + return nil, err + } + + return certificates, nil +} + +// GetCertificateByHeight returns a certificate by its height +func (a *AggSenderSQLStorage) GetCertificateByHeight(ctx context.Context, + height uint64) (types.CertificateInfo, error) { + var certificateInfo types.CertificateInfo + if err := meddler.QueryRow(a.db, &certificateInfo, + "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { + return types.CertificateInfo{}, getSelectQueryError(height, err) + } + + return certificateInfo, nil +} + +// GetLastSentCertificate returns the last certificate sent to the aggLayer +func (a *AggSenderSQLStorage) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { + var certificateInfo types.CertificateInfo + if err := meddler.QueryRow(a.db, &certificateInfo, + "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { + return types.CertificateInfo{}, getSelectQueryError(0, err) + } + + return certificateInfo, nil +} + +// SaveLastSentCertificate saves the last certificate sent to the aggLayer +func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if err := meddler.Insert(tx, "certificate_info", &certificate); err != nil { + return fmt.Errorf("error inserting certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) + + return nil +} + +// DeleteCertificate deletes a certificate from the storage +func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID); err != nil { + return fmt.Errorf("error deleting certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("deleted certificate - CertificateID: %s", certificateID) + + return nil +} + +// UpdateCertificateStatus updates the status of a certificate +func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if _, err := tx.Exec(`UPDATE certificate_info SET status = $1 WHERE certificate_id = $2;`, + certificate.Status, certificate.CertificateID); err != nil { + return fmt.Errorf("error updating certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("updated certificate status - CertificateID: %s", certificate.CertificateID) + + return nil +} + +// clean deletes all the data from the storage +// NOTE: Used only in tests +func (a *AggSenderSQLStorage) clean() error { + if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { + return err + } + + return nil +} + +func getSelectQueryError(height uint64, err error) error { + errToReturn := err + if errors.Is(err, sql.ErrNoRows) { + if height == 0 { + // height 0 is never sent to the aggLayer + // so we don't return an error in this case + errToReturn = nil + } else { + errToReturn = db.ErrNotFound + } + } + + return errToReturn +} diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go new file mode 100644 index 00000000..cfb7af7c --- /dev/null +++ b/aggsender/db/aggsender_db_storage_test.go @@ -0,0 +1,204 @@ +package db + +import ( + "context" + "path" + "testing" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db/migrations" + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func Test_Storage(t *testing.T) { + ctx := context.Background() + + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + require.NoError(t, migrations.RunMigrations(path)) + + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) + require.NoError(t, err) + + t.Run("SaveLastSentCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 1, + CertificateID: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + FromBlock: 1, + ToBlock: 2, + Status: agglayer.Settled, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("DeleteCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 2, + CertificateID: common.HexToHash("0x3"), + NewLocalExitRoot: common.HexToHash("0x4"), + FromBlock: 3, + ToBlock: 4, + Status: agglayer.Settled, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + require.NoError(t, storage.DeleteCertificate(ctx, certificate.CertificateID)) + + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.ErrorIs(t, err, db.ErrNotFound) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetLastSentCertificate", func(t *testing.T) { + // try getting a certificate that doesn't exist + certificateFromDB, err := storage.GetLastSentCertificate(ctx) + require.NoError(t, err) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that exists + certificate := types.CertificateInfo{ + Height: 3, + CertificateID: common.HexToHash("0x5"), + NewLocalExitRoot: common.HexToHash("0x6"), + FromBlock: 5, + ToBlock: 6, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err = storage.GetLastSentCertificate(ctx) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetCertificateByHeight", func(t *testing.T) { + // try getting height 0 + certificateFromDB, err := storage.GetCertificateByHeight(ctx, 0) + require.NoError(t, err) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that doesn't exist + certificateFromDB, err = storage.GetCertificateByHeight(ctx, 4) + require.ErrorIs(t, err, db.ErrNotFound) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that exists + certificate := types.CertificateInfo{ + Height: 11, + CertificateID: common.HexToHash("0x17"), + NewLocalExitRoot: common.HexToHash("0x18"), + FromBlock: 17, + ToBlock: 18, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err = storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetCertificatesByStatus", func(t *testing.T) { + // Insert some certificates with different statuses + certificates := []*types.CertificateInfo{ + { + Height: 7, + CertificateID: common.HexToHash("0x7"), + NewLocalExitRoot: common.HexToHash("0x8"), + FromBlock: 7, + ToBlock: 8, + Status: agglayer.Settled, + }, + { + Height: 9, + CertificateID: common.HexToHash("0x9"), + NewLocalExitRoot: common.HexToHash("0xA"), + FromBlock: 9, + ToBlock: 10, + Status: agglayer.Pending, + }, + { + Height: 11, + CertificateID: common.HexToHash("0xB"), + NewLocalExitRoot: common.HexToHash("0xC"), + FromBlock: 11, + ToBlock: 12, + Status: agglayer.InError, + }, + } + + for _, cert := range certificates { + require.NoError(t, storage.SaveLastSentCertificate(ctx, *cert)) + } + + // Test fetching certificates with status Settled + statuses := []agglayer.CertificateStatus{agglayer.Settled} + certificatesFromDB, err := storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[0]}, certificatesFromDB) + + // Test fetching certificates with status Pending + statuses = []agglayer.CertificateStatus{agglayer.Pending} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[1]}, certificatesFromDB) + + // Test fetching certificates with status InError + statuses = []agglayer.CertificateStatus{agglayer.InError} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[2]}, certificatesFromDB) + + // Test fetching certificates with status InError and Pending + statuses = []agglayer.CertificateStatus{agglayer.InError, agglayer.Pending} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 2) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[1], certificates[2]}, certificatesFromDB) + + require.NoError(t, storage.clean()) + }) + + t.Run("UpdateCertificateStatus", func(t *testing.T) { + // Insert a certificate + certificate := types.CertificateInfo{ + Height: 13, + CertificateID: common.HexToHash("0xD"), + NewLocalExitRoot: common.HexToHash("0xE"), + FromBlock: 13, + ToBlock: 14, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + // Update the status of the certificate + certificate.Status = agglayer.Settled + require.NoError(t, storage.UpdateCertificateStatus(ctx, certificate)) + + // Fetch the certificate and verify the status has been updated + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + require.Equal(t, certificate.Status, certificateFromDB.Status) + + require.NoError(t, storage.clean()) + }) +} diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql new file mode 100644 index 00000000..3ed7f997 --- /dev/null +++ b/aggsender/db/migrations/0001.sql @@ -0,0 +1,12 @@ +-- +migrate Down +DROP TABLE IF EXISTS certificate_info; + +-- +migrate Up +CREATE TABLE certificate_info ( + height INTEGER NOT NULL, + certificate_id VARCHAR NOT NULL PRIMARY KEY, + status INTEGER NOT NULL, + new_local_exit_root VARCHAR NOT NULL, + from_block INTEGER NOT NULL, + to_block INTEGER NOT NULL +); \ No newline at end of file diff --git a/aggsender/db/migrations/migrations.go b/aggsender/db/migrations/migrations.go new file mode 100644 index 00000000..31f16fd2 --- /dev/null +++ b/aggsender/db/migrations/migrations.go @@ -0,0 +1,22 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed 0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "0001", + SQL: mig001, + }, + } + + return db.RunMigrations(dbPath, migrations) +} diff --git a/aggsender/mocks/mock_aggsender_storage.go b/aggsender/mocks/mock_aggsender_storage.go new file mode 100644 index 00000000..a5f193fc --- /dev/null +++ b/aggsender/mocks/mock_aggsender_storage.go @@ -0,0 +1,354 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/aggsender/types" +) + +// AggSenderStorageMock is an autogenerated mock type for the AggSenderStorage type +type AggSenderStorageMock struct { + mock.Mock +} + +type AggSenderStorageMock_Expecter struct { + mock *mock.Mock +} + +func (_m *AggSenderStorageMock) EXPECT() *AggSenderStorageMock_Expecter { + return &AggSenderStorageMock_Expecter{mock: &_m.Mock} +} + +// DeleteCertificate provides a mock function with given fields: ctx, certificateID +func (_m *AggSenderStorageMock) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { + ret := _m.Called(ctx, certificateID) + + if len(ret) == 0 { + panic("no return value specified for DeleteCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, certificateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' +type AggSenderStorageMock_DeleteCertificate_Call struct { + *mock.Call +} + +// DeleteCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificateID common.Hash +func (_e *AggSenderStorageMock_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorageMock_DeleteCertificate_Call { + return &AggSenderStorageMock_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificateByHeight provides a mock function with given fields: ctx, height +func (_m *AggSenderStorageMock) GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) { + ret := _m.Called(ctx, height) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateByHeight") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (types.CertificateInfo, error)); ok { + return rf(ctx, height) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) types.CertificateInfo); ok { + r0 = rf(ctx, height) + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' +type AggSenderStorageMock_GetCertificateByHeight_Call struct { + *mock.Call +} + +// GetCertificateByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *AggSenderStorageMock_Expecter) GetCertificateByHeight(ctx interface{}, height interface{}) *AggSenderStorageMock_GetCertificateByHeight_Call { + return &AggSenderStorageMock_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", ctx, height)} +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Run(run func(ctx context.Context, height uint64)) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) RunAndReturn(run func(context.Context, uint64) (types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificatesByStatus provides a mock function with given fields: ctx, status +func (_m *AggSenderStorageMock) GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + ret := _m.Called(ctx, status) + + if len(ret) == 0 { + panic("no return value specified for GetCertificatesByStatus") + } + + var r0 []*types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { + return rf(ctx, status) + } + if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) []*types.CertificateInfo); ok { + r0 = rf(ctx, status) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.CertificateInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []agglayer.CertificateStatus) error); ok { + r1 = rf(ctx, status) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' +type AggSenderStorageMock_GetCertificatesByStatus_Call struct { + *mock.Call +} + +// GetCertificatesByStatus is a helper method to define mock.On call +// - ctx context.Context +// - status []agglayer.CertificateStatus +func (_e *AggSenderStorageMock_Expecter) GetCertificatesByStatus(ctx interface{}, status interface{}) *AggSenderStorageMock_GetCertificatesByStatus_Call { + return &AggSenderStorageMock_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", ctx, status)} +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Run(run func(ctx context.Context, status []agglayer.CertificateStatus)) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]agglayer.CertificateStatus)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) RunAndReturn(run func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Return(run) + return _c +} + +// GetLastSentCertificate provides a mock function with given fields: ctx +func (_m *AggSenderStorageMock) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastSentCertificate") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (types.CertificateInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) types.CertificateInfo); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' +type AggSenderStorageMock_GetLastSentCertificate_Call struct { + *mock.Call +} + +// GetLastSentCertificate is a helper method to define mock.On call +// - ctx context.Context +func (_e *AggSenderStorageMock_Expecter) GetLastSentCertificate(ctx interface{}) *AggSenderStorageMock_GetLastSentCertificate_Call { + return &AggSenderStorageMock_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate", ctx)} +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Run(run func(ctx context.Context)) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) RunAndReturn(run func(context.Context) (types.CertificateInfo, error)) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorageMock) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for SaveLastSentCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' +type AggSenderStorageMock_SaveLastSentCertificate_Call struct { + *mock.Call +} + +// SaveLastSentCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorageMock_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorageMock_SaveLastSentCertificate_Call { + return &AggSenderStorageMock_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// UpdateCertificateStatus provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorageMock) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for UpdateCertificateStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_UpdateCertificateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificateStatus' +type AggSenderStorageMock_UpdateCertificateStatus_Call struct { + *mock.Call +} + +// UpdateCertificateStatus is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorageMock_Expecter) UpdateCertificateStatus(ctx interface{}, certificate interface{}) *AggSenderStorageMock_UpdateCertificateStatus_Call { + return &AggSenderStorageMock_UpdateCertificateStatus_Call{Call: _e.mock.On("UpdateCertificateStatus", ctx, certificate)} +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Return(_a0 error) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Return(run) + return _c +} + +// NewAggSenderStorageMock creates a new instance of AggSenderStorageMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggSenderStorageMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AggSenderStorageMock { + mock := &AggSenderStorageMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_eth_client.go b/aggsender/mocks/mock_eth_client.go new file mode 100644 index 00000000..ebf618bf --- /dev/null +++ b/aggsender/mocks/mock_eth_client.go @@ -0,0 +1,154 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + coretypes "github.com/ethereum/go-ethereum/core/types" + + mock "github.com/stretchr/testify/mock" +) + +// EthClientMock is an autogenerated mock type for the EthClient type +type EthClientMock struct { + mock.Mock +} + +type EthClientMock_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClientMock) EXPECT() *EthClientMock_Expecter { + return &EthClientMock_Expecter{mock: &_m.Mock} +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClientMock_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClientMock_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClientMock_Expecter) BlockNumber(ctx interface{}) *EthClientMock_BlockNumber_Call { + return &EthClientMock_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClientMock_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClientMock_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClientMock_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClientMock_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClientMock_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClientMock_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *coretypes.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClientMock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClientMock_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClientMock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClientMock_HeaderByNumber_Call { + return &EthClientMock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClientMock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClientMock_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClientMock_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClientMock_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClientMock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClientMock_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClientMock { + mock := &EthClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l1infotree_syncer.go b/aggsender/mocks/mock_l1infotree_syncer.go new file mode 100644 index 00000000..e113d4ed --- /dev/null +++ b/aggsender/mocks/mock_l1infotree_syncer.go @@ -0,0 +1,217 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreeSyncerMock is an autogenerated mock type for the L1InfoTreeSyncer type +type L1InfoTreeSyncerMock struct { + mock.Mock +} + +type L1InfoTreeSyncerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreeSyncerMock) EXPECT() *L1InfoTreeSyncerMock_Expecter { + return &L1InfoTreeSyncerMock_Expecter{mock: &_m.Mock} +} + +// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot +func (_m *L1InfoTreeSyncerMock) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByGlobalExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(globalExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(globalExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' +type L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call struct { + *mock.Call +} + +// GetInfoByGlobalExitRoot is a helper method to define mock.On call +// - globalExitRoot common.Hash +func (_e *L1InfoTreeSyncerMock_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + return &L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root +func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { + ret := _m.Called(ctx, index, root) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") + } + + var r0 treetypes.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { + return rf(ctx, index, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { + r0 = rf(ctx, index, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(treetypes.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, index, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' +type L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { + *mock.Call +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +// - root common.Hash +func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + return &L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' +type L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call struct { + *mock.Call +} + +// GetL1InfoTreeRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + return &L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreeSyncerMock creates a new instance of L1InfoTreeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreeSyncerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreeSyncerMock { + mock := &L1InfoTreeSyncerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l2bridge_syncer.go b/aggsender/mocks/mock_l2bridge_syncer.go new file mode 100644 index 00000000..725184c3 --- /dev/null +++ b/aggsender/mocks/mock_l2bridge_syncer.go @@ -0,0 +1,423 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bridgesync "github.com/0xPolygon/cdk/bridgesync" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + etherman "github.com/0xPolygon/cdk/etherman" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L2BridgeSyncerMock is an autogenerated mock type for the L2BridgeSyncer type +type L2BridgeSyncerMock struct { + mock.Mock +} + +type L2BridgeSyncerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *L2BridgeSyncerMock) EXPECT() *L2BridgeSyncerMock_Expecter { + return &L2BridgeSyncerMock_Expecter{mock: &_m.Mock} +} + +// BlockFinality provides a mock function with given fields: +func (_m *L2BridgeSyncerMock) BlockFinality() etherman.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockFinality") + } + + var r0 etherman.BlockNumberFinality + if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(etherman.BlockNumberFinality) + } + + return r0 +} + +// L2BridgeSyncerMock_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' +type L2BridgeSyncerMock_BlockFinality_Call struct { + *mock.Call +} + +// BlockFinality is a helper method to define mock.On call +func (_e *L2BridgeSyncerMock_Expecter) BlockFinality() *L2BridgeSyncerMock_BlockFinality_Call { + return &L2BridgeSyncerMock_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) Run(run func()) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockByLER provides a mock function with given fields: ctx, ler +func (_m *L2BridgeSyncerMock) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetBlockByLER") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { + r0 = rf(ctx, ler) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' +type L2BridgeSyncerMock_GetBlockByLER_Call struct { + *mock.Call +} + +// GetBlockByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *L2BridgeSyncerMock_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncerMock_GetBlockByLER_Call { + return &L2BridgeSyncerMock_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Return(run) + return _c +} + +// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncerMock) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetBridgesPublished") + } + + var r0 []bridgesync.Bridge + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Bridge) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' +type L2BridgeSyncerMock_GetBridgesPublished_Call struct { + *mock.Call +} + +// GetBridgesPublished is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncerMock_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetBridgesPublished_Call { + return &L2BridgeSyncerMock_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Return(run) + return _c +} + +// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncerMock) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []bridgesync.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type L2BridgeSyncerMock_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncerMock_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetClaims_Call { + return &L2BridgeSyncerMock_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetExitRootByIndex provides a mock function with given fields: ctx, index +func (_m *L2BridgeSyncerMock) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetExitRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' +type L2BridgeSyncerMock_GetExitRootByIndex_Call struct { + *mock.Call +} + +// GetExitRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L2BridgeSyncerMock_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + return &L2BridgeSyncerMock_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *L2BridgeSyncerMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type L2BridgeSyncerMock_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2BridgeSyncerMock_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + return &L2BridgeSyncerMock_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// OriginNetwork provides a mock function with given fields: +func (_m *L2BridgeSyncerMock) OriginNetwork() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginNetwork") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// L2BridgeSyncerMock_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' +type L2BridgeSyncerMock_OriginNetwork_Call struct { + *mock.Call +} + +// OriginNetwork is a helper method to define mock.On call +func (_e *L2BridgeSyncerMock_Expecter) OriginNetwork() *L2BridgeSyncerMock_OriginNetwork_Call { + return &L2BridgeSyncerMock_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Run(run func()) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Return(run) + return _c +} + +// NewL2BridgeSyncerMock creates a new instance of L2BridgeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL2BridgeSyncerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *L2BridgeSyncerMock { + mock := &L2BridgeSyncerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_logger.go b/aggsender/mocks/mock_logger.go new file mode 100644 index 00000000..5b0eb4e9 --- /dev/null +++ b/aggsender/mocks/mock_logger.go @@ -0,0 +1,290 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// LoggerMock is an autogenerated mock type for the Logger type +type LoggerMock struct { + mock.Mock +} + +type LoggerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *LoggerMock) EXPECT() *LoggerMock_Expecter { + return &LoggerMock_Expecter{mock: &_m.Mock} +} + +// Debug provides a mock function with given fields: args +func (_m *LoggerMock) Debug(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' +type LoggerMock_Debug_Call struct { + *mock.Call +} + +// Debug is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Debug(args ...interface{}) *LoggerMock_Debug_Call { + return &LoggerMock_Debug_Call{Call: _e.mock.On("Debug", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Debug_Call) Run(run func(args ...interface{})) *LoggerMock_Debug_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Debug_Call) Return() *LoggerMock_Debug_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Debug_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Debug_Call { + _c.Call.Return(run) + return _c +} + +// Debugf provides a mock function with given fields: format, args +func (_m *LoggerMock) Debugf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' +type LoggerMock_Debugf_Call struct { + *mock.Call +} + +// Debugf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Debugf(format interface{}, args ...interface{}) *LoggerMock_Debugf_Call { + return &LoggerMock_Debugf_Call{Call: _e.mock.On("Debugf", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Debugf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Debugf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Debugf_Call) Return() *LoggerMock_Debugf_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Debugf_Call { + _c.Call.Return(run) + return _c +} + +// Error provides a mock function with given fields: args +func (_m *LoggerMock) Error(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' +type LoggerMock_Error_Call struct { + *mock.Call +} + +// Error is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Error(args ...interface{}) *LoggerMock_Error_Call { + return &LoggerMock_Error_Call{Call: _e.mock.On("Error", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Error_Call) Run(run func(args ...interface{})) *LoggerMock_Error_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Error_Call) Return() *LoggerMock_Error_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Error_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Error_Call { + _c.Call.Return(run) + return _c +} + +// Errorf provides a mock function with given fields: format, args +func (_m *LoggerMock) Errorf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' +type LoggerMock_Errorf_Call struct { + *mock.Call +} + +// Errorf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Errorf(format interface{}, args ...interface{}) *LoggerMock_Errorf_Call { + return &LoggerMock_Errorf_Call{Call: _e.mock.On("Errorf", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Errorf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Errorf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Errorf_Call) Return() *LoggerMock_Errorf_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Errorf_Call { + _c.Call.Return(run) + return _c +} + +// Info provides a mock function with given fields: args +func (_m *LoggerMock) Info(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' +type LoggerMock_Info_Call struct { + *mock.Call +} + +// Info is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Info(args ...interface{}) *LoggerMock_Info_Call { + return &LoggerMock_Info_Call{Call: _e.mock.On("Info", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Info_Call) Run(run func(args ...interface{})) *LoggerMock_Info_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Info_Call) Return() *LoggerMock_Info_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Info_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Info_Call { + _c.Call.Return(run) + return _c +} + +// Infof provides a mock function with given fields: format, args +func (_m *LoggerMock) Infof(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' +type LoggerMock_Infof_Call struct { + *mock.Call +} + +// Infof is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Infof(format interface{}, args ...interface{}) *LoggerMock_Infof_Call { + return &LoggerMock_Infof_Call{Call: _e.mock.On("Infof", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Infof_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Infof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Infof_Call) Return() *LoggerMock_Infof_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Infof_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Infof_Call { + _c.Call.Return(run) + return _c +} + +// NewLoggerMock creates a new instance of LoggerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLoggerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *LoggerMock { + mock := &LoggerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/types/types.go b/aggsender/types/types.go new file mode 100644 index 00000000..d6421132 --- /dev/null +++ b/aggsender/types/types.go @@ -0,0 +1,65 @@ +package types + +import ( + "context" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/bridgesync" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// L1InfoTreeSyncer is an interface defining functions that an L1InfoTreeSyncer should implement +type L1InfoTreeSyncer interface { + GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) + GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx context.Context, index uint32, root common.Hash, + ) (treeTypes.Proof, error) + GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) +} + +// L2BridgeSyncer is an interface defining functions that an L2BridgeSyncer should implement +type L2BridgeSyncer interface { + GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) + GetExitRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) + GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) + GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + OriginNetwork() uint32 + BlockFinality() etherman.BlockNumberFinality + GetLastProcessedBlock(ctx context.Context) (uint64, error) +} + +// EthClient is an interface defining functions that an EthClient should implement +type EthClient interface { + BlockNumber(ctx context.Context) (uint64, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) +} + +// Logger is an interface that defines the methods to log messages +type Logger interface { + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) +} + +type CertificateInfo struct { + Height uint64 `meddler:"height"` + CertificateID common.Hash `meddler:"certificate_id"` + NewLocalExitRoot common.Hash `meddler:"new_local_exit_root"` + FromBlock uint64 `meddler:"from_block"` + ToBlock uint64 `meddler:"to_block"` + Status agglayer.CertificateStatus `meddler:"status"` +} + +func (c CertificateInfo) String() string { + return fmt.Sprintf("Height: %d, CertificateID: %s, FromBlock: %d, ToBlock: %d, NewLocalExitRoot: %s", + c.Height, c.CertificateID.String(), c.FromBlock, c.ToBlock, c.NewLocalExitRoot.String()) +} diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index e6a61c5e..b3c3c853 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -16,10 +16,17 @@ const ( downloadBufferSize = 1000 ) +type ReorgDetector interface { + sync.ReorgDetector +} + // BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. type BridgeSync struct { processor *processor driver *sync.EVMDriver + + originNetwork uint32 + blockFinality etherman.BlockNumberFinality } // NewL1 creates a bridge syncer that synchronizes the mainnet exit tree @@ -29,12 +36,13 @@ func NewL1( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -49,6 +57,7 @@ func NewL1( waitForNewBlocksPeriod, retryAfterErrorPeriod, maxRetryAttemptsAfterError, + originNetwork, false, ) } @@ -60,12 +69,13 @@ func NewL2( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -80,6 +90,7 @@ func NewL2( waitForNewBlocksPeriod, retryAfterErrorPeriod, maxRetryAttemptsAfterError, + originNetwork, true, ) } @@ -90,13 +101,14 @@ func newBridgeSync( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, l1OrL2ID string, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, syncFullClaims bool, ) (*BridgeSync, error) { processor, err := newProcessor(dbPath, l1OrL2ID) @@ -146,8 +158,10 @@ func newBridgeSync( } return &BridgeSync{ - processor: processor, - driver: driver, + processor: processor, + driver: driver, + originNetwork: originNetwork, + blockFinality: blockFinalityType, }, nil } @@ -172,12 +186,16 @@ func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) return s.processor.GetBridges(ctx, fromBlock, toBlock) } +func (s *BridgeSync) GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { + return s.processor.GetBridgesPublished(ctx, fromBlock, toBlock) +} + func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } -func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - root, err := p.exitTree.GetRootByHash(ctx, ler) +func (s *BridgeSync) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + root, err := s.processor.exitTree.GetRootByHash(ctx, ler) if err != nil { return 0, err } @@ -191,3 +209,18 @@ func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.R } return root, nil } + +// GetExitRootByIndex returns the root of the exit tree at the moment the leaf with the given index was added +func (s *BridgeSync) GetExitRootByIndex(ctx context.Context, index uint32) (tree.Root, error) { + return s.processor.exitTree.GetRootByIndex(ctx, index) +} + +// OriginNetwork returns the network ID of the origin chain +func (s *BridgeSync) OriginNetwork() uint32 { + return s.originNetwork +} + +// BlockFinality returns the block finality type +func (s *BridgeSync) BlockFinality() etherman.BlockNumberFinality { + return s.blockFinality +} diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go new file mode 100644 index 00000000..cb328c68 --- /dev/null +++ b/bridgesync/bridgesync_test.go @@ -0,0 +1,81 @@ +package bridgesync_test + +import ( + "context" + "testing" + "time" + + "github.com/0xPolygon/cdk/bridgesync" + mocksbridgesync "github.com/0xPolygon/cdk/bridgesync/mocks" + "github.com/0xPolygon/cdk/etherman" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// Mock implementations for the interfaces +type MockEthClienter struct { + mock.Mock +} + +type MockBridgeContractor struct { + mock.Mock +} + +func TestNewLx(t *testing.T) { + ctx := context.Background() + dbPath := "test_db_path" + bridge := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + syncBlockChunkSize := uint64(100) + blockFinalityType := etherman.SafeBlock + initialBlock := uint64(0) + waitForNewBlocksPeriod := time.Second * 10 + retryAfterErrorPeriod := time.Second * 5 + maxRetryAttemptsAfterError := 3 + originNetwork := uint32(1) + + mockEthClient := mocksbridgesync.NewEthClienter(t) + mockReorgDetector := mocksbridgesync.NewReorgDetector(t) + + mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) + + bridgeSync, err := bridgesync.NewL1( + ctx, + dbPath, + bridge, + syncBlockChunkSize, + blockFinalityType, + mockReorgDetector, + mockEthClient, + initialBlock, + waitForNewBlocksPeriod, + retryAfterErrorPeriod, + maxRetryAttemptsAfterError, + originNetwork, + ) + + assert.NoError(t, err) + assert.NotNil(t, bridgeSync) + assert.Equal(t, originNetwork, bridgeSync.OriginNetwork()) + assert.Equal(t, blockFinalityType, bridgeSync.BlockFinality()) + + bridgeSyncL2, err := bridgesync.NewL2( + ctx, + dbPath, + bridge, + syncBlockChunkSize, + blockFinalityType, + mockReorgDetector, + mockEthClient, + initialBlock, + waitForNewBlocksPeriod, + retryAfterErrorPeriod, + maxRetryAttemptsAfterError, + originNetwork, + ) + + assert.NoError(t, err) + assert.NotNil(t, bridgeSync) + assert.Equal(t, originNetwork, bridgeSyncL2.OriginNetwork()) + assert.Equal(t, blockFinalityType, bridgeSyncL2.BlockFinality()) +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index b8b432ae..a4ab49de 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -74,6 +74,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } expectedClaim2 := Claim{ OriginNetwork: 87, @@ -86,6 +87,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } expectedClaim3 := Claim{ OriginNetwork: 69, @@ -98,6 +100,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } auth.GasLimit = 999999 // for some reason gas estimation fails :( diff --git a/bridgesync/config.go b/bridgesync/config.go index 66eb00ed..d2373b53 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -24,4 +24,6 @@ type Config struct { MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` + // OriginNetwork is the id of the network where the bridge is deployed + OriginNetwork uint32 `mapstructure:"OriginNetwork"` } diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index dbea8c8f..782d5f1b 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -288,11 +288,14 @@ func decodeClaimCallDataAndSetIfFound(data []interface{}, claim *Claim) (bool, e if !ok { return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) } + claim.Metadata, ok = data[10].([]byte) if !ok { return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) } + claim.GlobalExitRoot = crypto.Keccak256Hash(claim.MainnetExitRoot.Bytes(), claim.RollupExitRoot.Bytes()) + return true, nil } } diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index c0a22484..a8868ce1 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -29,7 +29,7 @@ func TestBridgeEventE2E(t *testing.T) { go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} - syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) + syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0, 1) require.NoError(t, err) go syncer.Start(ctx) diff --git a/bridgesync/migrations/bridgesync0001.sql b/bridgesync/migrations/bridgesync0001.sql index de90910c..74adc6d5 100644 --- a/bridgesync/migrations/bridgesync0001.sql +++ b/bridgesync/migrations/bridgesync0001.sql @@ -16,7 +16,7 @@ CREATE TABLE bridge ( origin_address VARCHAR NOT NULL, destination_network INTEGER NOT NULL, destination_address VARCHAR NOT NULL, - amount DECIMAL(78, 0) NOT NULL, + amount TEXT NOT NULL, metadata BLOB, deposit_count INTEGER NOT NULL, PRIMARY KEY (block_num, block_pos) @@ -25,11 +25,11 @@ CREATE TABLE bridge ( CREATE TABLE claim ( block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, block_pos INTEGER NOT NULL, - global_index DECIMAL(78, 0) NOT NULL, + global_index TEXT NOT NULL, origin_network INTEGER NOT NULL, origin_address VARCHAR NOT NULL, destination_address VARCHAR NOT NULL, - amount DECIMAL(78, 0) NOT NULL, + amount TEXT NOT NULL, proof_local_exit_root VARCHAR, proof_rollup_exit_root VARCHAR, mainnet_exit_root VARCHAR, diff --git a/bridgesync/mocks/bridge_contractor.go b/bridgesync/mocks/bridge_contractor.go new file mode 100644 index 00000000..fd559850 --- /dev/null +++ b/bridgesync/mocks/bridge_contractor.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// BridgeContractor is an autogenerated mock type for the BridgeContractor type +type BridgeContractor struct { + mock.Mock +} + +type BridgeContractor_Expecter struct { + mock *mock.Mock +} + +func (_m *BridgeContractor) EXPECT() *BridgeContractor_Expecter { + return &BridgeContractor_Expecter{mock: &_m.Mock} +} + +// LastUpdatedDepositCount provides a mock function with given fields: ctx, BlockNumber +func (_m *BridgeContractor) LastUpdatedDepositCount(ctx context.Context, BlockNumber uint64) (uint32, error) { + ret := _m.Called(ctx, BlockNumber) + + if len(ret) == 0 { + panic("no return value specified for LastUpdatedDepositCount") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint32, error)); ok { + return rf(ctx, BlockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint32); ok { + r0 = rf(ctx, BlockNumber) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, BlockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeContractor_LastUpdatedDepositCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LastUpdatedDepositCount' +type BridgeContractor_LastUpdatedDepositCount_Call struct { + *mock.Call +} + +// LastUpdatedDepositCount is a helper method to define mock.On call +// - ctx context.Context +// - BlockNumber uint64 +func (_e *BridgeContractor_Expecter) LastUpdatedDepositCount(ctx interface{}, BlockNumber interface{}) *BridgeContractor_LastUpdatedDepositCount_Call { + return &BridgeContractor_LastUpdatedDepositCount_Call{Call: _e.mock.On("LastUpdatedDepositCount", ctx, BlockNumber)} +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) Run(run func(ctx context.Context, BlockNumber uint64)) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) Return(_a0 uint32, _a1 error) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) RunAndReturn(run func(context.Context, uint64) (uint32, error)) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Return(run) + return _c +} + +// NewBridgeContractor creates a new instance of BridgeContractor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeContractor(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeContractor { + mock := &BridgeContractor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/mocks/eth_clienter.go b/bridgesync/mocks/eth_clienter.go new file mode 100644 index 00000000..3d208e45 --- /dev/null +++ b/bridgesync/mocks/eth_clienter.go @@ -0,0 +1,1136 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + context "context" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthClienter is an autogenerated mock type for the EthClienter type +type EthClienter struct { + mock.Mock +} + +type EthClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClienter) EXPECT() *EthClienter_Expecter { + return &EthClienter_Expecter{mock: &_m.Mock} +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' +type EthClienter_BlockByHash_Call struct { + *mock.Call +} + +// BlockByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { + return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} +} + +func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { + _c.Call.Return(run) + return _c +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type EthClienter_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { + return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClienter_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { + return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// CallContract provides a mock function with given fields: ctx, call, blockNumber +func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, call, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, call, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, call, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, call, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' +type EthClienter_CallContract_Call struct { + *mock.Call +} + +// CallContract is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { + return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} +} + +func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { + _c.Call.Return(run) + return _c +} + +// Client provides a mock function with given fields: +func (_m *EthClienter) Client() *rpc.Client { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Client") + } + + var r0 *rpc.Client + if rf, ok := ret.Get(0).(func() *rpc.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.Client) + } + } + + return r0 +} + +// EthClienter_Client_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Client' +type EthClienter_Client_Call struct { + *mock.Call +} + +// Client is a helper method to define mock.On call +func (_e *EthClienter_Expecter) Client() *EthClienter_Client_Call { + return &EthClienter_Client_Call{Call: _e.mock.On("Client")} +} + +func (_c *EthClienter_Client_Call) Run(run func()) *EthClienter_Client_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EthClienter_Client_Call) Return(_a0 *rpc.Client) *EthClienter_Client_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_Client_Call) RunAndReturn(run func() *rpc.Client) *EthClienter_Client_Call { + _c.Call.Return(run) + return _c +} + +// CodeAt provides a mock function with given fields: ctx, contract, blockNumber +func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, contract, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, contract, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, contract, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, contract, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' +type EthClienter_CodeAt_Call struct { + *mock.Call +} + +// CodeAt is a helper method to define mock.On call +// - ctx context.Context +// - contract common.Address +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { + return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} +} + +func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { + _c.Call.Return(run) + return _c +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' +type EthClienter_EstimateGas_Call struct { + *mock.Call +} + +// EstimateGas is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { + return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} +} + +func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg)) + }) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type EthClienter_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { + return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthClienter_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { + return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClienter_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { + return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' +type EthClienter_PendingCodeAt_Call struct { + *mock.Call +} + +// PendingCodeAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { + return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} +} + +func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(run) + return _c +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' +type EthClienter_PendingNonceAt_Call struct { + *mock.Call +} + +// PendingNonceAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { + return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} +} + +func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(run) + return _c +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' +type EthClienter_SendTransaction_Call struct { + *mock.Call +} + +// SendTransaction is a helper method to define mock.On call +// - ctx context.Context +// - tx *types.Transaction +func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { + return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} +} + +func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.Transaction)) + }) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' +type EthClienter_SubscribeFilterLogs_Call struct { + *mock.Call +} + +// SubscribeFilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +// - ch chan<- types.Log +func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { + return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) + }) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeNewHead provides a mock function with given fields: ctx, ch +func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { + ret := _m.Called(ctx, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeNewHead") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { + return rf(ctx, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { + r0 = rf(ctx, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { + r1 = rf(ctx, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' +type EthClienter_SubscribeNewHead_Call struct { + *mock.Call +} + +// SubscribeNewHead is a helper method to define mock.On call +// - ctx context.Context +// - ch chan<- *types.Header +func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { + return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} +} + +func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- *types.Header)) + }) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' +type EthClienter_SuggestGasPrice_Call struct { + *mock.Call +} + +// SuggestGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { + return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} +} + +func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' +type EthClienter_SuggestGasTipCap_Call struct { + *mock.Call +} + +// SuggestGasTipCap is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { + return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(run) + return _c +} + +// TransactionCount provides a mock function with given fields: ctx, blockHash +func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { + ret := _m.Called(ctx, blockHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionCount") + } + + var r0 uint + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { + return rf(ctx, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { + r0 = rf(ctx, blockHash) + } else { + r0 = ret.Get(0).(uint) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, blockHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' +type EthClienter_TransactionCount_Call struct { + *mock.Call +} + +// TransactionCount is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { + return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} +} + +func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { + _c.Call.Return(run) + return _c +} + +// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index +func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { + ret := _m.Called(ctx, blockHash, index) + + if len(ret) == 0 { + panic("no return value specified for TransactionInBlock") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { + return rf(ctx, blockHash, index) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { + r0 = rf(ctx, blockHash, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { + r1 = rf(ctx, blockHash, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' +type EthClienter_TransactionInBlock_Call struct { + *mock.Call +} + +// TransactionInBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +// - index uint +func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { + return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} +} + +func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) + }) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClienter { + mock := &EthClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/mocks/reorg_detector.go b/bridgesync/mocks/reorg_detector.go new file mode 100644 index 00000000..d24f4b83 --- /dev/null +++ b/bridgesync/mocks/reorg_detector.go @@ -0,0 +1,147 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + reorgdetector "github.com/0xPolygon/cdk/reorgdetector" +) + +// ReorgDetector is an autogenerated mock type for the ReorgDetector type +type ReorgDetector struct { + mock.Mock +} + +type ReorgDetector_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgDetector) EXPECT() *ReorgDetector_Expecter { + return &ReorgDetector_Expecter{mock: &_m.Mock} +} + +// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash +func (_m *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { + ret := _m.Called(ctx, id, blockNum, blockHash) + + if len(ret) == 0 { + panic("no return value specified for AddBlockToTrack") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { + r0 = rf(ctx, id, blockNum, blockHash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorgDetector_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' +type ReorgDetector_AddBlockToTrack_Call struct { + *mock.Call +} + +// AddBlockToTrack is a helper method to define mock.On call +// - ctx context.Context +// - id string +// - blockNum uint64 +// - blockHash common.Hash +func (_e *ReorgDetector_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetector_AddBlockToTrack_Call { + return &ReorgDetector_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) + }) + return _c +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function with given fields: id +func (_m *ReorgDetector) Subscribe(id string) (*reorgdetector.Subscription, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 *reorgdetector.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*reorgdetector.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgDetector_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type ReorgDetector_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - id string +func (_e *ReorgDetector_Expecter) Subscribe(id interface{}) *ReorgDetector_Subscribe_Call { + return &ReorgDetector_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} +} + +func (_c *ReorgDetector_Subscribe_Call) Run(run func(id string)) *ReorgDetector_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ReorgDetector_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetector_Subscribe_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgDetector_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetector_Subscribe_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgDetector creates a new instance of ReorgDetector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgDetector(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgDetector { + mock := &ReorgDetector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e4ba5423..e8a79c1f 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -20,9 +20,14 @@ import ( _ "modernc.org/sqlite" ) +const ( + globalIndexPartSize = 4 + globalIndexMaxSize = 9 +) + var ( - // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. - ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") + // errBlockNotProcessedFormat indicates that the given block(s) have not been processed yet. + errBlockNotProcessedFormat = fmt.Sprintf("block %%d not processed, last processed: %%d") ) // Bridge is the representation of a bridge event @@ -93,10 +98,15 @@ type Event struct { Claim *Claim } +type BridgeContractor interface { + LastUpdatedDepositCount(ctx context.Context, BlockNumber uint64) (uint32, error) +} + type processor struct { - db *sql.DB - exitTree *tree.AppendOnlyTree - log *log.Logger + db *sql.DB + exitTree *tree.AppendOnlyTree + log *log.Logger + bridgeContract BridgeContractor } func newProcessor(dbPath, loggerPrefix string) (*processor, error) { @@ -116,6 +126,11 @@ func newProcessor(dbPath, loggerPrefix string) (*processor, error) { log: logger, }, nil } +func (p *processor) GetBridgesPublished( + ctx context.Context, fromBlock, toBlock uint64, +) ([]Bridge, error) { + return p.GetBridges(ctx, fromBlock, toBlock) +} func (p *processor) GetBridges( ctx context.Context, fromBlock, toBlock uint64, @@ -196,7 +211,7 @@ func (p *processor) isBlockProcessed(tx db.Querier, blockNum uint64) error { return err } if lpb < blockNum { - return ErrBlockNotProcessed + return fmt.Errorf(errBlockNotProcessedFormat, blockNum, lpb) } return nil } @@ -300,7 +315,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int { var ( globalIndexBytes []byte - buf [4]byte + buf [globalIndexPartSize]byte ) if mainnetFlag { globalIndexBytes = append(globalIndexBytes, big.NewInt(1).Bytes()...) @@ -313,5 +328,52 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootInde leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:]) globalIndexBytes = append(globalIndexBytes, leri...) - return big.NewInt(0).SetBytes(globalIndexBytes) + result := big.NewInt(0).SetBytes(globalIndexBytes) + + return result +} + +// Decodes global index to its three parts: +// 1. mainnetFlag - first byte +// 2. rollupIndex - next 4 bytes +// 3. localExitRootIndex - last 4 bytes +// NOTE - mainnet flag is not in the global index bytes if it is false +// NOTE - rollup index is 0 if mainnet flag is true +// NOTE - rollup index is not in the global index bytes if mainnet flag is false and rollup index is 0 +func DecodeGlobalIndex(globalIndex *big.Int) (mainnetFlag bool, + rollupIndex uint32, localExitRootIndex uint32, err error) { + globalIndexBytes := globalIndex.Bytes() + l := len(globalIndexBytes) + if l > globalIndexMaxSize { + return false, 0, 0, errors.New("invalid global index length") + } + + if l == 0 { + // false, 0, 0 + return + } + + if l == globalIndexMaxSize { + // true, rollupIndex, localExitRootIndex + mainnetFlag = true + } + + localExitRootFromIdx := l - globalIndexPartSize + if localExitRootFromIdx < 0 { + localExitRootFromIdx = 0 + } + + rollupIndexFromIdx := localExitRootFromIdx - globalIndexPartSize + if rollupIndexFromIdx < 0 { + rollupIndexFromIdx = 0 + } + + rollupIndex = convertBytesToUint32(globalIndexBytes[rollupIndexFromIdx:localExitRootFromIdx]) + localExitRootIndex = convertBytesToUint32(globalIndexBytes[localExitRootFromIdx:]) + + return +} + +func convertBytesToUint32(bytes []byte) uint32 { + return uint32(big.NewInt(0).SetBytes(bytes).Uint64()) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 2ff03c76..ab31f17d 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -3,6 +3,7 @@ package bridgesync import ( "context" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -11,13 +12,71 @@ import ( "testing" migrationsBridge "github.com/0xPolygon/cdk/bridgesync/migrations" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree/testvectors" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" "github.com/stretchr/testify/require" ) +func TestBigIntString(t *testing.T) { + globalIndex := GenerateGlobalIndex(true, 0, 1093) + fmt.Println(globalIndex.String()) + + _, ok := new(big.Int).SetString(globalIndex.String(), 10) + require.True(t, ok) + + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + + err := migrationsBridge.RunMigrations(dbPath) + require.NoError(t, err) + db, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + require.NoError(t, err) + + claim := &Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: GenerateGlobalIndex(true, 0, 1093), + OriginNetwork: 11, + Amount: big.NewInt(11), + OriginAddress: common.HexToAddress("0x11"), + DestinationAddress: common.HexToAddress("0x11"), + ProofLocalExitRoot: types.Proof{}, + ProofRollupExitRoot: types.Proof{}, + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + GlobalExitRoot: common.Hash{}, + DestinationNetwork: 12, + } + + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, claim.BlockNum) + require.NoError(t, err) + require.NoError(t, meddler.Insert(tx, "claim", claim)) + + require.NoError(t, tx.Commit()) + + tx, err = db.BeginTx(ctx, nil) + require.NoError(t, err) + + rows, err := tx.Query(` + SELECT * FROM claim + WHERE block_num >= $1 AND block_num <= $2; + `, claim.BlockNum, claim.BlockNum) + require.NoError(t, err) + + claimsFromDB := []*Claim{} + require.NoError(t, meddler.ScanAll(rows, &claimsFromDB)) + require.Len(t, claimsFromDB, 1) + require.Equal(t, claim, claimsFromDB[0]) +} + func TestProceessor(t *testing.T) { path := path.Join(t.TempDir(), "file::memory:?cache=shared") log.Debugf("sqlite path: %s", path) @@ -53,7 +112,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &getBridges{ p: p, @@ -62,7 +121,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &processBlockAction{ p: p, @@ -85,7 +144,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), }, &getBridges{ p: p, @@ -94,7 +153,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), }, &getClaims{ p: p, @@ -128,7 +187,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &getBridges{ p: p, @@ -137,7 +196,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &processBlockAction{ p: p, @@ -582,3 +641,219 @@ func TestHashBridge(t *testing.T) { }) } } + +func TestDecodeGlobalIndex(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + globalIndex *big.Int + expectedMainnetFlag bool + expectedRollupIndex uint32 + expectedLocalIndex uint32 + expectedErr error + }{ + { + name: "Mainnet flag true, rollup index 0", + globalIndex: GenerateGlobalIndex(true, 0, 2), + expectedMainnetFlag: true, + expectedRollupIndex: 0, + expectedLocalIndex: 2, + expectedErr: nil, + }, + { + name: "Mainnet flag true, indexes 0", + globalIndex: GenerateGlobalIndex(true, 0, 0), + expectedMainnetFlag: true, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, rollup index 0", + globalIndex: GenerateGlobalIndex(false, 0, 2), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 2, + expectedErr: nil, + }, + { + name: "Mainnet flag false, rollup index non-zero", + globalIndex: GenerateGlobalIndex(false, 11, 0), + expectedMainnetFlag: false, + expectedRollupIndex: 11, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, indexes 0", + globalIndex: GenerateGlobalIndex(false, 0, 0), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, indexes non zero", + globalIndex: GenerateGlobalIndex(false, 1231, 111234), + expectedMainnetFlag: false, + expectedRollupIndex: 1231, + expectedLocalIndex: 111234, + expectedErr: nil, + }, + { + name: "Invalid global index length", + globalIndex: big.NewInt(0).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: errors.New("invalid global index length"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(tt.globalIndex) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedMainnetFlag, mainnetFlag) + require.Equal(t, tt.expectedRollupIndex, rollupIndex) + require.Equal(t, tt.expectedLocalIndex, localExitRootIndex) + }) + } +} + +func TestInsertAndGetClaim(t *testing.T) { + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + err := migrationsBridge.RunMigrations(path) + require.NoError(t, err) + p, err := newProcessor(path, "foo") + require.NoError(t, err) + + tx, err := p.db.BeginTx(context.Background(), nil) + require.NoError(t, err) + + // insert test claim + testClaim := &Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: GenerateGlobalIndex(true, 0, 1093), + OriginNetwork: 11, + OriginAddress: common.HexToAddress("0x11"), + DestinationAddress: common.HexToAddress("0x11"), + Amount: big.NewInt(11), + ProofLocalExitRoot: types.Proof{}, + ProofRollupExitRoot: types.Proof{}, + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + GlobalExitRoot: common.Hash{}, + DestinationNetwork: 12, + Metadata: []byte("0x11"), + IsMessage: false, + } + + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, testClaim.BlockNum) + require.NoError(t, err) + require.NoError(t, meddler.Insert(tx, "claim", testClaim)) + + require.NoError(t, tx.Commit()) + + // get test claim + claims, err := p.GetClaims(context.Background(), 1, 1) + require.NoError(t, err) + require.Len(t, claims, 1) + require.Equal(t, testClaim, &claims[0]) +} + +type mockBridgeContract struct { + lastUpdatedDepositCount uint32 + err error +} + +func (m *mockBridgeContract) LastUpdatedDepositCount(ctx context.Context, blockNumber uint64) (uint32, error) { + return m.lastUpdatedDepositCount, m.err +} + +func TestGetBridgesPublished(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + fromBlock uint64 + toBlock uint64 + bridges []Bridge + lastUpdatedDepositCount uint32 + expectedBridges []Bridge + expectedError error + }{ + { + name: "no bridges", + fromBlock: 1, + toBlock: 10, + bridges: []Bridge{}, + lastUpdatedDepositCount: 0, + expectedBridges: []Bridge{}, + expectedError: nil, + }, + { + name: "bridges within deposit count", + fromBlock: 1, + toBlock: 10, + bridges: []Bridge{ + {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, + {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, + }, + lastUpdatedDepositCount: 2, + expectedBridges: []Bridge{ + {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, + {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, + }, + expectedError: nil, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + require.NoError(t, migrationsBridge.RunMigrations(path)) + p, err := newProcessor(path, "foo") + require.NoError(t, err) + + tx, err := p.db.BeginTx(context.Background(), nil) + require.NoError(t, err) + + for i := tc.fromBlock; i <= tc.toBlock; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) + require.NoError(t, err) + } + + for _, bridge := range tc.bridges { + require.NoError(t, meddler.Insert(tx, "bridge", &bridge)) + } + + require.NoError(t, tx.Commit()) + + ctx := context.Background() + bridges, err := p.GetBridgesPublished(ctx, tc.fromBlock, tc.toBlock) + + if tc.expectedError != nil { + require.Equal(t, tc.expectedError, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedBridges, bridges) + } + }) + } +} diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index b4fce499..426d7b3e 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -26,7 +26,7 @@ func TestE2EL1toEVML2(t *testing.T) { env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} - bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) + bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0, 1) require.NoError(t, err) go bridgeSyncL1.Start(ctx) diff --git a/cmd/main.go b/cmd/main.go index 23c01783..15b0fdc6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -41,7 +41,8 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, + common.AGGORACLE, common.RPC, common.AGGSENDER), } saveConfigFlag = cli.StringFlag{ Name: config.FlagSaveConfigPath, diff --git a/cmd/run.go b/cmd/run.go index 4bd4dd0d..c30da739 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,10 +12,12 @@ import ( zkevm "github.com/0xPolygon/cdk" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" jRPC "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/aggregator/db" + "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" cdkcommon "github.com/0xPolygon/cdk/common" @@ -61,7 +63,7 @@ func start(cliCtx *cli.Context) error { components := cliCtx.StringSlice(config.FlagComponents) l1Client := runL1ClientIfNeeded(components, c.Etherman.URL) - l2Client := runL2ClientIfNeeded(components, c.AggOracle.EVMSender.URLRPCL2) + l2Client := runL2ClientIfNeeded(components, getL2RPCUrl(c)) reorgDetectorL1, errChanL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1) go func() { if err := <-errChanL1; err != nil { @@ -119,6 +121,18 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() + case cdkcommon.AGGSENDER: + aggsender, err := createAggSender( + cliCtx.Context, + c.AggSender, + l1InfoTreeSync, + l2BridgeSync, + ) + if err != nil { + log.Fatal(err) + } + + go aggsender.Start(cliCtx.Context) } } @@ -127,6 +141,18 @@ func start(cliCtx *cli.Context) error { return nil } +func createAggSender( + ctx context.Context, + cfg aggsender.Config, + l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, + l2Syncer *bridgesync.BridgeSync, +) (*aggsender.AggSender, error) { + logger := log.WithFields("module", cdkcommon.AGGSENDER) + agglayerClient := agglayer.NewAggLayerClient(cfg.AggLayerURL) + + return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer) +} + func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations @@ -479,7 +505,8 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.SEQUENCE_SENDER}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -509,6 +536,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC, + cdkcommon.AGGSENDER, }, components) { return nil } @@ -522,10 +550,11 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil } - log.Debugf("dialing L2 client at: %s", urlRPCL2) + + log.Infof("dialing L2 client at: %s", urlRPCL2) l2CLient, err := ethclient.Dial(urlRPCL2) if err != nil { log.Fatal(err) @@ -542,7 +571,7 @@ func runReorgDetectorL1IfNeeded( ) (*reorgdetector.ReorgDetector, chan error) { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.RPC}, + cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil, nil } @@ -565,7 +594,7 @@ func runReorgDetectorL2IfNeeded( l2Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client) @@ -675,6 +704,7 @@ func runBridgeSyncL1IfNeeded( cfg.WaitForNewBlocksPeriod.Duration, cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, + cfg.OriginNetwork, ) if err != nil { log.Fatalf("error creating bridgeSyncL1: %s", err) @@ -691,10 +721,10 @@ func runBridgeSyncL2IfNeeded( reorgDetectorL2 *reorgdetector.ReorgDetector, l2Client *ethclient.Client, ) *bridgesync.BridgeSync { - // TODO: will be needed by AGGSENDER - if !isNeeded([]string{cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil } + bridgeSyncL2, err := bridgesync.NewL2( ctx, cfg.DBPath, @@ -707,6 +737,7 @@ func runBridgeSyncL2IfNeeded( cfg.WaitForNewBlocksPeriod.Duration, cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, + cfg.OriginNetwork, ) if err != nil { log.Fatalf("error creating bridgeSyncL2: %s", err) @@ -745,3 +776,11 @@ func createRPC( return jRPC.NewServer(cfg, services, jRPC.WithLogger(logger.GetSugaredLogger())) } + +func getL2RPCUrl(c *config.Config) string { + if c.AggSender.URLRPCL2 != "" { + return c.AggSender.URLRPCL2 + } + + return c.AggOracle.EVMSender.URLRPCL2 +} diff --git a/common/common.go b/common/common.go index cd5b5d70..c74f56e4 100644 --- a/common/common.go +++ b/common/common.go @@ -1,10 +1,15 @@ package common import ( + "crypto/ecdsa" "encoding/binary" "math/big" + "os" + "path/filepath" + "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" ) @@ -88,3 +93,19 @@ func CalculateAccInputHash( return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) } + +// NewKeyFromKeystore creates a private key from a keystore file +func NewKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { + if cfg.Path == "" && cfg.Password == "" { + return nil, nil + } + keystoreEncrypted, err := os.ReadFile(filepath.Clean(cfg.Path)) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(keystoreEncrypted, cfg.Password) + if err != nil { + return nil, err + } + return key.PrivateKey, nil +} diff --git a/common/components.go b/common/components.go index 0c2df8d7..7ef9d285 100644 --- a/common/components.go +++ b/common/components.go @@ -13,4 +13,6 @@ const ( CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck // PROVER name to identify the prover component PROVER = "prover" + // AGGSENDER name to identify the aggsender component + AGGSENDER = "aggsender" ) diff --git a/config/config.go b/config/config.go index b21ba971..9363b93b 100644 --- a/config/config.go +++ b/config/config.go @@ -10,6 +10,7 @@ import ( jRPC "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggregator" + "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" @@ -135,7 +136,6 @@ type Config struct { NetworkConfig NetworkConfig // Configuration of the sequence sender service SequenceSender sequencesender.Config - // Common Config that affects all the services Common common.Config // Configuration of the reorg detector service to be used for the L1 @@ -162,6 +162,9 @@ type Config struct { // LastGERSync is the config for the synchronizer in charge of syncing the last GER injected on L2. // Needed for the bridge service (RPC) LastGERSync lastgersync.Config + + // AggSender is the configuration of the agg sender service + AggSender aggsender.Config } // Load loads the configuration diff --git a/config/default.go b/config/default.go index 5e5fafcb..7f2ae8b6 100644 --- a/config/default.go +++ b/config/default.go @@ -7,7 +7,7 @@ L1URL = "http://localhost:8545" L2URL = "http://localhost:8123" L1AggOracleURL = "http://test-aggoracle-l1:8545" L2AggOracleURL = "http://test-aggoracle-l2:8545" - +AggLayerURL = "https://agglayer-dev.polygon.technology" ForkId = 9 ContractVersions = "elderberry" @@ -17,13 +17,12 @@ L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" SequencerPrivateKeyPath = "/app/sequencer.keystore" SequencerPrivateKeyPassword = "test" WitnessURL = "http://localhost:8123" -AggLayerURL = "https://agglayer-dev.polygon.technology" AggregatorPrivateKeyPath = "/app/keystore/aggregator.keystore" AggregatorPrivateKeyPassword = "testonly" # Who send Proof to L1? AggLayer addr, or aggregator addr? SenderProofToL1Addr = "0x0000000000000000000000000000000000000000" - +polygonBridgeAddr = "0x0000000000000000000000000000000000000000" # This values can be override directly from genesis.json @@ -36,7 +35,7 @@ genesisBlockNumber = 0 polygonRollupManagerAddress = "0x0000000000000000000000000000000000000000" polTokenAddress = "0x0000000000000000000000000000000000000000" polygonZkEVMAddress = "0x0000000000000000000000000000000000000000" - polygonBridgeAddr = "0x0000000000000000000000000000000000000000" + [L2Config] GlobalExitRootAddr = "0x0000000000000000000000000000000000000000" @@ -265,7 +264,7 @@ WriteTimeout = "2s" MaxRequestsPerIPAndSecond = 10 [ClaimSponsor] -DBPath = "/{{PathRWData}}/claimsopnsor" +DBPath = "/{{PathRWData}}/claimsopnsor.sqlite" Enabled = true SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" @@ -297,28 +296,30 @@ GasOffset = 0 HTTPHeaders = [] [BridgeL1Sync] -DBPath = "{{PathRWData}}/bridgel1sync" +DBPath = "{{PathRWData}}/bridgel1sync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "{{L1Config.polygonBridgeAddr}}" +BridgeAddr = "{{polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" +OriginNetwork=0 [BridgeL2Sync] -DBPath = "{{PathRWData}}/bridgel2sync" +DBPath = "{{PathRWData}}/bridgel2sync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "{{L1Config.polygonBridgeAddr}}" +BridgeAddr = "{{polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" +OriginNetwork=1 [LastGERSync] # MDBX database path -DBPath = "{{PathRWData}}/lastgersync" +DBPath = "{{PathRWData}}/lastgersync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 GlobalExitRootL2Addr = "{{L2Config.GlobalExitRootAddr}}" @@ -335,4 +336,12 @@ RollupManagerAddr = "{{L1Config.polygonRollupManagerAddress}}" GlobalExitRootManagerAddr = "{{L1Config.polygonZkEVMGlobalExitRootAddress}}" +[AggSender] +StoragePath = "{{PathRWData}}/aggsender.sqlite" +AggLayerURL = "{{AggLayerURL}}" +AggsenderPrivateKey = {Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"} +BlockGetInterval = "2s" +URLRPCL2="{{L2URL}}" +CheckSettledInterval = "2s" +SaveCertificatesToFiles = false ` diff --git a/l1infotree/tree.go b/l1infotree/tree.go index f3ad6d36..17258ba0 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -109,15 +109,17 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] if len(leaves)%2 == 1 { leaves = append(leaves, mt.zeroHashes[h]) } - if index >= uint32(len(leaves)) { - siblings = append(siblings, mt.zeroHashes[h]) - } else { - if index%2 == 1 { // If it is odd + if index%2 == 1 { // If it is odd + siblings = append(siblings, leaves[index-1]) + } else if len(leaves) > 1 { // It is even + if index >= uint32(len(leaves)) { + // siblings = append(siblings, mt.zeroHashes[h]) siblings = append(siblings, leaves[index-1]) - } else { // It is even + } else { siblings = append(siblings, leaves[index+1]) } } + var ( nsi [][][]byte hashes [][32]byte diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go index 6af4b8b3..a0fe9b97 100644 --- a/l1infotree/tree_test.go +++ b/l1infotree/tree_test.go @@ -3,6 +3,7 @@ package l1infotree_test import ( "encoding/hex" "encoding/json" + "fmt" "os" "testing" @@ -129,3 +130,56 @@ func TestAddLeaf2(t *testing.T) { require.Equal(t, testVector.NewRoot, newRoot) } } + +func TestAddLeaf2TestLastLeaf(t *testing.T) { + mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) + require.NoError(t, err) + leaves := [][32]byte{ + common.HexToHash("0x6a617315ffc0a6831d2de6331f8d3e053889e9385696c13f11853fdcba50e123"), + common.HexToHash("0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741"), + } + siblings, root, err := mt.ComputeMerkleProof(2, leaves) + require.NoError(t, err) + fmt.Printf("Root: %s\n", root.String()) + for i := 0; i < len(siblings); i++ { + hash := common.BytesToHash(siblings[i][:]) + fmt.Printf("Sibling %d: %s\n", i, hash.String()) + } + expectedProof := []string{ + "0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741", + "0x7ae3eca221dee534b82adffb8003ad3826ddf116132e4ff55c681ff723bc7e42", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} + for i := 0; i < len(siblings); i++ { + require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) + } + require.Equal(t, "0xb85687d05a6bdccadcc1170a0e2bbba6855c35c984a0bc91697bc066bd38a338", root.String()) +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index e7115a60..2cd6190c 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -402,7 +402,7 @@ func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, e SELECT * FROM l1info_leaf WHERE global_exit_root = $1 LIMIT 1; - `, ger.Hex()) + `, ger.String()) return info, db.ReturnErrNotFound(err) } diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 52a81ce8..34c5daef 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -1,10 +1,15 @@ package l1infotreesync import ( + "fmt" "testing" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/l1infotree" + "github.com/0xPolygon/cdk/l1infotreesync/migrations" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree" "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -124,8 +129,6 @@ func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { } func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { - t.Parallel() - testTable := []struct { name string getProcessor func(t *testing.T) *processor @@ -184,8 +187,6 @@ func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - p := tt.getProcessor(t) proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) if tt.expectedErr != nil { @@ -267,3 +268,93 @@ func Test_processor_Reorg(t *testing.T) { }) } } + +func TestProofsFromDifferentTrees(t *testing.T) { + fmt.Println("aggregator L1InfoTree ===============================================") + + l1Tree, err := l1infotree.NewL1InfoTree(log.WithFields("test"), types.DefaultHeight, [][32]byte{}) + require.NoError(t, err) + + leaves := createTestLeaves(t, 2) + + aLeaves := make([][32]byte, len(leaves)) + for i, leaf := range leaves { + aLeaves[i] = l1infotree.HashLeafData( + leaf.GlobalExitRoot, + leaf.PreviousBlockHash, + leaf.Timestamp) + } + + aggregatorL1InfoTree, aggregatorRoot, err := l1Tree.ComputeMerkleProof(leaves[0].L1InfoTreeIndex, aLeaves) + require.NoError(t, err) + + aggregatorProof := types.Proof{} + for i, p := range aggregatorL1InfoTree { + aggregatorProof[i] = common.BytesToHash(p[:]) + } + + fmt.Println(aggregatorRoot) + fmt.Println(aggregatorProof) + fmt.Println("l1 info tree syncer L1InfoTree ===============================================") + + dbPath := "file:l1InfoTreeTest?mode=memory&cache=shared" + require.NoError(t, migrations.RunMigrations(dbPath)) + + dbe, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + l1InfoTree := tree.NewAppendOnlyTree(dbe, migrations.L1InfoTreePrefix) + + tx, err := db.NewTx(context.Background(), dbe) + require.NoError(t, err) + + for _, leaf := range leaves { + err = l1InfoTree.AddLeaf(tx, leaf.BlockNumber, leaf.BlockPosition, types.Leaf{ + Index: leaf.L1InfoTreeIndex, + Hash: leaf.Hash, + }) + + require.NoError(t, err) + } + + require.NoError(t, tx.Commit()) + + l1InfoTreeSyncerRoot, err := l1InfoTree.GetRootByIndex(context.Background(), leaves[1].L1InfoTreeIndex) + require.NoError(t, err) + l1InfoTreeSyncerProof, err := l1InfoTree.GetProof(context.Background(), leaves[0].L1InfoTreeIndex, l1InfoTreeSyncerRoot.Hash) + require.NoError(t, err) + for i, l := range aggregatorL1InfoTree { + require.Equal(t, common.Hash(l), l1InfoTreeSyncerProof[i]) + } + + fmt.Println(leaves[0].GlobalExitRoot) + fmt.Println(l1InfoTreeSyncerProof) + + require.Equal(t, aggregatorRoot, l1InfoTreeSyncerRoot.Hash) + require.Equal(t, aggregatorProof, l1InfoTreeSyncerProof) +} + +func createTestLeaves(t *testing.T, numOfLeaves int) []*L1InfoTreeLeaf { + t.Helper() + + leaves := make([]*L1InfoTreeLeaf, 0, numOfLeaves) + + for i := 0; i < numOfLeaves; i++ { + leaf := &L1InfoTreeLeaf{ + L1InfoTreeIndex: uint32(i), + Timestamp: uint64(i), + BlockNumber: uint64(i), + BlockPosition: uint64(i), + PreviousBlockHash: common.HexToHash(fmt.Sprintf("0x%x", i)), + MainnetExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), + RollupExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), + } + + leaf.GlobalExitRoot = leaf.globalExitRoot() + leaf.Hash = leaf.hash() + + leaves = append(leaves, leaf) + } + + return leaves +} diff --git a/scripts/local_config b/scripts/local_config index 6922f15e..d1a47b2c 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -30,10 +30,13 @@ function get_value_from_toml_file(){ local _LINE local _inside_section=0 local _return_next_line=0 + local _TMP_FILE=$(mktemp) + cat $_FILE > $_TMP_FILE + # Maybe the file doesnt end with a new line so we added just in case + echo >> $_TMP_FILE while read -r _LINE; do # Clean up line from spaces and tabs _LINE=$(echo $_LINE | tr -d '[:space:]') - #echo $_LINE if [ $_inside_section -eq 1 ]; then if [[ "$_LINE" == [* ]]; then return 1 @@ -51,6 +54,7 @@ function get_value_from_toml_file(){ if [ $_key_value == "[" ]; then _return_next_line=1 else + rm $_TMP_FILE # sed sentence remove quotes echo $_key_value | sed 's/^[[:space:]]*"//;s/"$//' return 0 @@ -61,7 +65,8 @@ function get_value_from_toml_file(){ fi - done < "$_FILE" + done < "$_TMP_FILE" + rm $_TMP_FILE return 2 } @@ -73,7 +78,7 @@ function export_key_from_toml_file_or_fatal(){ local _KEY="$4" local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) if [ -z "$_VALUE" ]; then - log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION" + log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION in file $_FILE" fi export $_EXPORTED_VAR_NAME="$_VALUE" log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY" @@ -141,7 +146,10 @@ function export_values_of_cdk_node_config(){ export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password export_obj_key_from_toml_file_or_fatal zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password - export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password + export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr + export is_cdk_validium=$zkevm_is_validium export zkevm_rollup_chain_id=$l2_chain_id @@ -198,13 +206,14 @@ function export_portnum_from_kurtosis_or_fail(){ ############################################################################### function export_ports_from_kurtosis(){ export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc - export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 rpc rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc rpc export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer export aggregator_db_hostname="127.0.0.1" export l1_rpc_url="http://localhost:${l1_rpc_port}" export l2_rpc_url="http://localhost:${zkevm_rpc_http_port}" + export agglayer_url="http://localhost:${agglayer_port}" } ############################################################################### @@ -244,8 +253,10 @@ EOF ############################################################################### function create_dest_folder(){ export DEST=${TMP_CDK_FOLDER}/local_config + export path_rw_data=${TMP_CDK_FOLDER}/runtime [ ! -d ${DEST} ] && mkdir -p ${DEST} rm $DEST/* + mkdir $path_rw_data } ############################################################################### function download_kurtosis_artifacts(){ @@ -263,6 +274,10 @@ function download_kurtosis_artifacts(){ kurtosis files download $KURTOSIS_ENCLAVE aggregator-keystore $DEST ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" export zkevm_l2_aggregator_keystore_file=$DEST/aggregator.keystore + + kurtosis files download $KURTOSIS_ENCLAVE agglayer-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact agglayer to $DEST" + export zkevm_l2_agglayer_keystore_file=$DEST/agglayer.keystore } ############################################################################### @@ -278,9 +293,31 @@ function check_generated_config_file(){ fi } ############################################################################### +function parse_command_line_args(){ + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + echo "Usage: $0" + echo " -h: help" + exit 0 + ;; + -e|--enclave) + KURTOSIS_ENCLAVE=$2 + shift + shift + ;; + -*) + echo "Invalid Option: $1" 1>&2 + exit 1 + ;; + esac + done +} +############################################################################### # MAIN ############################################################################### set -o pipefail # enable strict command pipe error detection +parse_command_line_args $* check_requirements create_dest_folder @@ -311,6 +348,7 @@ echo "- Stop cdk-node:" echo " kurtosis service stop cdk-v1 cdk-node-001" echo " " echo "- Add next configuration to vscode launch.json" +echo " -----------------------------------------------------------" cat << EOF { "name": "Debug cdk", @@ -325,5 +363,12 @@ cat << EOF "-components", "sequence-sender,aggregator", ] }, + + To run AggSender change components to: + "-components", "aggsender", EOF +echo " -----------------------------------------------------------" +echo " " +echo " - rembember to clean previous execution data: " +echo " rm -Rf ${path_rw_data}/*" \ No newline at end of file diff --git a/sonar-project.properties b/sonar-project.properties index 815d53a8..f46e9863 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,11 +7,11 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*, scripts/** +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*,scripts/**,**/mock_*.go,**/agglayer/**,**/cmd/** sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/* +sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/agglayer/**,**/cmd/** sonar.issue.enforceSemantic=true # ===================================================== diff --git a/test/Makefile b/test/Makefile index d173c423..a864cf82 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,8 +1,8 @@ .PHONY: generate-mocks generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender \ generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ - generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator - + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator \ + generate-mocks-aggsender generate-mocks-agglayer generate-mocks-bridgesync .PHONY: generate-mocks-bridgesync generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool @@ -53,13 +53,30 @@ generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go +.PHONY: generate-mocks-aggsender +generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L1InfoTreeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L1InfoTreeSyncerMock --filename=mock_l1infotree_syncer.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L2BridgeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L2BridgeSyncerMock --filename=mock_l2bridge_syncer.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Logger --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=LoggerMock --filename=mock_logger.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggSenderStorage --dir=../aggsender/db --output=../aggsender/mocks --outpkg=mocks --structname=AggSenderStorageMock --filename=mock_aggsender_storage.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClient --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=EthClientMock --filename=mock_eth_client.go ${COMMON_MOCKERY_PARAMS} + +.PHONY: generate-mocks-agglayer +generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go + +.PHONY: generate-mocks-bridgesync +generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool + rm -Rf ../bridgesync/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../bridgesync --output ../bridgesync/mocks --outpkg mocks_bridgesync ${COMMON_MOCKERY_PARAMS} + + .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop ./run-e2e.sh fork9 cdk-validium diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index fa01b528..68f6ec97 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,8 +1,9 @@ -PathRWData = "/data/" +PathRWData = "{{.path_rw_data}}/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" L1AggOracleURL = "http://test-aggoracle-l1:8545" L2AggOracleURL = "http://test-aggoracle-l2:8545" +AggLayerURL="{{.agglayer_url}}" ForkId = {{.zkevm_rollup_fork_id}} IsValidiumMode = {{.is_cdk_validium}} @@ -19,13 +20,11 @@ SequencerPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" AggregatorPrivateKeyPath = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}" AggregatorPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" SenderProofToL1Addr = "{{.zkevm_l2_agglayer_address}}" - +polygonBridgeAddr = "{{.zkevm_bridge_address}}" RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" -AggLayerURL = "http://agglayer:{{.agglayer_port}}" - # This values can be override directly from genesis.json @@ -38,8 +37,7 @@ genesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" polygonRollupManagerAddress = "{{.zkevm_rollup_manager_address}}" polTokenAddress = "{{.pol_token_address}}" polygonZkEVMAddress = "{{.zkevm_rollup_address}}" - polygonBridgeAddr = "0x0000000000000000000000000000000000000000" - + [L2Config] GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" @@ -58,4 +56,7 @@ Outputs = ["stderr"] Host = "{{.aggregator_db.hostname}}" Port = "{{.aggregator_db.port}}" EnableLog = false - MaxConns = 200 \ No newline at end of file + MaxConns = 200 + +[AggSender] +SequencerPrivateKey = {Path = "{{or .zkevm_l2_agglayer_keystore_file "/pk/sequencer.keystore"}}", Password = "{{.zkevm_l2_agglayer_keystore_password}}"} diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index c753393a..7b3cb008 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -38,7 +38,6 @@ function claim() { echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file - echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) diff --git a/tree/tree.go b/tree/tree.go index 0e3a0c69..6abb9e3d 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -7,8 +7,10 @@ import ( "fmt" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/russross/meddler" "golang.org/x/crypto/sha3" ) @@ -112,7 +114,8 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return types.Proof{}, err } if isErrNotFound { - return types.Proof{}, db.ErrNotFound + // TODO: Validate it. It returns a proof of a tree with missing leafs + log.Warnf("getSiblings returned proof with zero hashes for index %d and root %s", index, root.String()) } return siblings, nil } @@ -122,7 +125,7 @@ func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, err := meddler.QueryRow( tx, node, fmt.Sprintf(`select * from %s where hash = $1`, t.rhtTable), - nodeHash.Hex(), + nodeHash.String(), ) if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -250,5 +253,20 @@ func (t *Tree) Reorg(tx db.Txer, firstReorgedBlock uint64) error { firstReorgedBlock, ) return err - // NOTE: rht is not cleaned, this could be done in the future as optimization +} + +// CalculateRoot calculates the Merkle Root based on the leaf and proof of inclusion +func CalculateRoot(leafHash common.Hash, proof [types.DefaultHeight]common.Hash, index uint32) common.Hash { + node := leafHash + + // Compute the Merkle root + for height := uint8(0); height < types.DefaultHeight; height++ { + if (index>>height)&1 == 1 { + node = crypto.Keccak256Hash(proof[height].Bytes(), node.Bytes()) + } else { + node = crypto.Keccak256Hash(node.Bytes(), proof[height].Bytes()) + } + } + + return node }